ngram
listlengths 0
67.8k
|
|---|
[
"base_app.models import BankSettings from rest_framework import serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank",
"from rest_framework import serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings. This serializer",
"a bank settings. This serializer provides detailed information about bank settings. ''' class",
"This serializer provides detailed information about bank settings. ''' class Meta: model =",
"serializer provides detailed information about bank settings. ''' class Meta: model = BankSettings",
"provides detailed information about bank settings. ''' class Meta: model = BankSettings fields",
"import serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings. This serializer provides detailed",
"import BankSettings from rest_framework import serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings.",
"from base_app.models import BankSettings from rest_framework import serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a",
"serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings. This serializer provides detailed information",
"'''Serializer for a bank settings. This serializer provides detailed information about bank settings.",
"rest_framework import serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings. This serializer provides",
"about bank settings. ''' class Meta: model = BankSettings fields = ['curr_bank_day'] read_only_fields",
"settings. ''' class Meta: model = BankSettings fields = ['curr_bank_day'] read_only_fields = ['curr_bank_day']",
"detailed information about bank settings. ''' class Meta: model = BankSettings fields =",
"BankSettings from rest_framework import serializers class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings. This",
"for a bank settings. This serializer provides detailed information about bank settings. '''",
"bank settings. This serializer provides detailed information about bank settings. ''' class Meta:",
"bank settings. ''' class Meta: model = BankSettings fields = ['curr_bank_day'] read_only_fields =",
"settings. This serializer provides detailed information about bank settings. ''' class Meta: model",
"BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings. This serializer provides detailed information about bank",
"information about bank settings. ''' class Meta: model = BankSettings fields = ['curr_bank_day']",
"class BankSettingsDetailsSerializer(serializers.ModelSerializer): '''Serializer for a bank settings. This serializer provides detailed information about"
] |
[
"'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters =",
"= ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file),",
"as fp: setup = pickle.load(fp) setups[setup.setup_id] = setup # all flow ids are",
"def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm",
"ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for setup_file",
"'rb') as fp: flow = pickle.load(fp) result = openmlcontrib.setups.setup_to_parameter_dict(setup, flow, True, cs) self.assertEqual(expected_active_parameters[result['classifier__kernel']],",
"fp: setup = pickle.load(fp) setups[setup.setup_id] = setup # all flow ids are supposed",
"set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20)",
"for i in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self):",
"with self.assertRaises(ValueError): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self):",
"= \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self):",
"with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs))",
"all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def",
"TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup =",
"pickle.load(fp) setups[setup.setup_id] = setup # all flow ids are supposed to be the",
"0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree])",
"self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i for i in range(30)]",
"self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server =",
"self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree =",
"def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups",
"for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup",
"self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids =",
"setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5,",
"i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup =",
"open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered =",
"sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids),",
"i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i in",
"setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc threshold = 3 poly_setups",
"flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace()",
"ConfigSpace import openml import openmlcontrib import os import pickle from openmlcontrib.testing import TestBase",
"= TestBase._get_libsvm_svc_config_space() setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb')",
"test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc",
"flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20,",
"= TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup",
"0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel',",
"setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel',",
"pipeline with libsvm svc threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller",
"self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree =",
"= pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space()",
"'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree =",
"setup_ids = [i for i in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()),",
"def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7)",
"len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups',",
"setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree",
"cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup =",
"fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0)",
"= pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self):",
"[i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server",
"TestBase._get_libsvm_svc_config_space() setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as",
"pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree",
"\"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids = [i for",
"'rb') as fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:",
"= \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids = [i",
"bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) +",
"= openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup =",
"with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs))",
"set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids),",
"= '' def test_obtain_setups_by_ids(self): setup_ids = [i for i in range(1, 30)] setups",
"= openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1,",
"cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs =",
"test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file",
"as fp: flow = pickle.load(fp) result = openmlcontrib.setups.setup_to_parameter_dict(setup, flow, True, cs) self.assertEqual(expected_active_parameters[result['classifier__kernel']], set(result.keys()))",
"as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups),",
"with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid'])",
"setup # all flow ids are supposed to be the same with open('../data/flows/%d.pkl'",
"pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) result =",
"test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups',",
"os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) setups[setup.setup_id] = setup",
"+ len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids,",
"30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids =",
"= self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids = [i for i in",
"setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp:",
"degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in",
"inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs =",
"pickle from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server",
"cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups',",
"openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\"",
"self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups,",
"cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as",
"self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace()",
"self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids = [i for i in range(1,",
"'' def test_obtain_setups_by_ids(self): setup_ids = [i for i in range(1, 30)] setups =",
"flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file in",
"self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs =",
"= set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0)",
"0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file in",
"same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20)",
"poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup",
"= pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup,",
"= openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids",
"poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids =",
"= openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc threshold = 3 poly_setups =",
"openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups",
"'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0)",
"set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs",
"pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree",
"setup.flow_id, 'rb') as fp: flow = pickle.load(fp) result = openmlcontrib.setups.setup_to_parameter_dict(setup, flow, True, cs)",
"threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold)",
"as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered),",
"= [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server =",
"# all flow ids are supposed to be the same with open('../data/flows/%d.pkl' %",
"= [i for i in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids))",
"-5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups',",
"test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as",
"test_obtain_setups_by_ids(self): setup_ids = [i for i in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7)",
"setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server",
"set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids)",
"import pickle from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\"",
"% setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self):",
"ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup",
"with libsvm svc threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller =",
"'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids",
"ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with",
"os import pickle from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server =",
"openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids = [i for i in range(1, 30)]",
"in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707,",
"-1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for setup_file in os.listdir('../data/setups'):",
"with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) setups[setup.setup_id] = setup #",
"libsvm svc threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups,",
"expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file),",
"def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for",
"from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server =",
"in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl'",
"3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger =",
"default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for setup_file in os.listdir('../data/setups'): with",
"% setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self):",
"= ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'):",
"= pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup,",
"class TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server",
"= openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly'])",
"def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm",
"'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys())",
"flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs =",
"= ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp:",
"TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as",
"def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def",
"test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file",
"poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime =",
"open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def",
"= smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space()",
"degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in",
"fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20,",
"openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3)",
"test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups =",
"openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids =",
"[i for i in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def",
"setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) setups[setup.setup_id]",
"[i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for",
"setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters",
"test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self):",
"openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids =",
"len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys()))",
"20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\",",
"= 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger",
"range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100)",
"with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) result = openmlcontrib.setups.setup_to_parameter_dict(setup,",
"= openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i for",
"openml.config.server = self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids = [i for i",
"pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs",
"in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i in range(30)]",
"all flow ids are supposed to be the same with open('../data/flows/%d.pkl' % setup.flow_id,",
"self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc threshold = 3",
"= ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'):",
"= pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self):",
"def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups = {} for setup_file in os.listdir('../data/setups'): with",
"= pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups = {}",
"openmlcontrib import os import pickle from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def setUp(self):",
"+ len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with",
"in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) setups[setup.setup_id] =",
"openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False,",
"= openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys())",
"setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey = ''",
"= set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10)",
"for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp)",
"as fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow",
"= pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3)",
"setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs",
"with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered",
"20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file),",
"= openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids",
"as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters()",
"flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs",
"test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc",
"len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree])",
"setup_file), 'rb') as fp: setup = pickle.load(fp) setups[setup.setup_id] = setup # all flow",
"'rb') as fp: setup = pickle.load(fp) setups[setup.setup_id] = setup # all flow ids",
"'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters =",
"import ConfigSpace import openml import openmlcontrib import os import pickle from openmlcontrib.testing import",
"require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline",
"allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids)",
"def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey =",
"setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids =",
"= set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids),",
"-5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for setup_file in",
"pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow,",
"def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with",
"= pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) result",
"self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self):",
"import openml import openmlcontrib import os import pickle from openmlcontrib.testing import TestBase class",
"degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {}",
"default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb')",
"setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i",
"openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) #",
"TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey",
"pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for",
"flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs",
"smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for",
"setup_file), 'rb') as fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as",
"openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc poly_setups",
"\"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey = '' def test_obtain_setups_by_ids(self): setup_ids",
"= openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree',",
"self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server = self.test_server openml.config.apikey = '' def",
"openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i for i",
"open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb')",
"# pipeline with libsvm svc threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly'])",
"the same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertEqual(len(setups),",
"= TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb')",
"openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup,",
"poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) +",
"flow, cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups = {} for setup_file in",
"-20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups',",
"cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups = {} for setup_file in os.listdir('../data/setups'):",
"poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys())",
"for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i",
"fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs",
"supposed to be the same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow",
"setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow,",
"pipeline with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel',",
"= self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc threshold =",
"openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707,",
"setups[setup.setup_id] = setup # all flow ids are supposed to be the same",
"openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3)",
"self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'):",
"limit=7) def test_filter_setup_list_nominal(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with",
"10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server",
"open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) result = openmlcontrib.setups.setup_to_parameter_dict(setup, flow,",
"= setup # all flow ids are supposed to be the same with",
"are supposed to be the same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:",
"openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters",
"open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def",
"= {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup",
"openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1)",
"= self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc poly_setups =",
"max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids",
"cs.add_hyperparameters([degree]) setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as",
"= openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def test_filter_setup_list_by_config_space_fails(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1,",
"= [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i",
"{} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup =",
"self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace()",
"test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups',",
"svc threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree',",
"set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids))",
"% setup.flow_id, 'rb') as fp: flow = pickle.load(fp) result = openmlcontrib.setups.setup_to_parameter_dict(setup, flow, True,",
"= ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for",
"test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def",
"openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc threshold",
"10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server",
"openml import openmlcontrib import os import pickle from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase):",
"= pickle.load(fp) setups[setup.setup_id] = setup # all flow ids are supposed to be",
"allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) #",
"poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups,",
"flow ids are supposed to be the same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb')",
"'rb') as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs)",
"as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\",",
"sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters =",
"fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered))",
"'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys())",
"range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids,",
"pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups = {} for",
"self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file",
"# pipeline with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup,",
"svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids =",
"def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for",
"ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with",
"set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with",
"in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError):",
"<filename>tests/test_setups/test_setups_functions.py import ConfigSpace import openml import openmlcontrib import os import pickle from openmlcontrib.testing",
"setup = pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp)",
"limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7)",
"os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl' %",
"flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def",
"-1, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file),",
"self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline",
"def test_obtain_setups_by_ids(self): setup_ids = [i for i in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids,",
"% setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups,",
"be the same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp)",
"libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids",
"fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow =",
"smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters),",
"pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow,",
"as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space()",
"size=100) # pipeline with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly']) sigm_setups =",
"flow = pickle.load(fp) self.assertEqual(len(setups), 20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups), len(setups_filtered)) def",
"cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5, -1, default_value=-3) cs =",
"cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp:",
"'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def test_filter_setup_list_nominal_numeric(self): openml.config.server = self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100)",
"cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'):",
"poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids = set(poly_setups_smaller.keys())",
"open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) setups[setup.setup_id] = setup # all",
"cs)) def test_setup_in_configuration_space_param_not_present(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"test123\", -20, 20, default_value=-3) cs = ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree])",
"= poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime",
"self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly'])",
"20) setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\",",
"allowed_values=['poly']) poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold) poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1) smaller_ids =",
"size=100) # pipeline with libsvm svc threshold = 3 poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel',",
"import os import pickle from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server",
"limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i for i in",
"setup = pickle.load(fp) setups[setup.setup_id] = setup # all flow ids are supposed to",
"= set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids)) def test_setup_in_configuration_space(self):",
"ConfigSpace.ConfigurationSpace() cs.add_hyperparameters([degree]) setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb')",
"range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids",
"fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups",
"20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10) poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly']) self.assertEqual(poly_ids, set(poly_setups_prime.keys())) def",
"flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups =",
"cs = TestBase._get_libsvm_svc_config_space() setups = {} for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file),",
"len(all_ids)) def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file),",
"self.assertRaises(ValueError): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) def test_obtain_setups_by_ids_incomplete(self): setup_ids",
"= openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids)",
"ids are supposed to be the same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as",
"inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters), 0) self.assertGreater(len(poly_ids) + len(sigm_ids), 20) self.assertGreater(len(poly_ids), 10) self.assertGreater(len(sigm_ids), 10)",
"openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters = poly_ids.intersection(sigm_ids) self.assertEqual(len(inters),",
"'rb') as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs =",
"import openmlcontrib import os import pickle from openmlcontrib.testing import TestBase class TestSetupFunctions(TestBase): def",
"= set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids) self.assertEqual(len(inters), 0) self.assertEqual(len(smaller_ids) + len(bigger_ids),",
"self.live_server setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup,",
"self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self): cs = TestBase._get_libsvm_svc_config_space() setups = {} for setup_file",
"setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs) self.assertEqual(len(setups_filtered), 0) def test_setup_in_configuration_space_illegal_value(self): degree = ConfigSpace.UniformIntegerHyperparameter(\"degree\", -5,",
"self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_setup_to_parameter_dict(self): expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters() cs = TestBase._get_libsvm_svc_config_space() for setup_file",
"% setup.flow_id, 'rb') as fp: flow = pickle.load(fp) self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs)) def test_filter_setup_list_by_config_space(self):",
"TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server =",
"def test_setup_in_configuration_space(self): cs = TestBase._get_libsvm_svc_config_space() for setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb')",
"min=threshold+1) smaller_ids = set(poly_setups_smaller.keys()) bigger_ids = set(poly_setups_bigger.keys()) all_ids = set(poly_setups.keys()) inters = smaller_ids.intersection(bigger_ids)",
"allowed_values=['poly']) sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid']) poly_ids = set(poly_setups.keys()) sigm_ids = set(sigm_setups.keys()) inters",
"with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) with open('../data/flows/%d.pkl' % setup.flow_id,",
"i in range(1, 30)] setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7) self.assertEqual(set(setups.keys()), set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with",
"import TestBase class TestSetupFunctions(TestBase): def setUp(self): self.live_server = \"https://www.openml.org/api/v1/xml/\" self.test_server = \"https://test.openml.org/api/v1/xml/\" openml.config.server",
"to be the same with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp: flow =",
"set(setup_ids)) def test_obtain_setups_by_ids_incomplete_raise(self): with self.assertRaises(ValueError): setup_ids = [i for i in range(30)] openmlcontrib.setups.obtain_setups_by_ids(setup_ids,",
"setup_file in os.listdir('../data/setups'): with open(os.path.join('../data/setups', setup_file), 'rb') as fp: setup = pickle.load(fp) with"
] |
[
"type(station) is tuple assert type(station[1]) is int i=0 for i in range(0,len(test)-1): assert",
"test_rivers_by_station_number(): \"\"\"Tests to check that the outputs from funtion rivers_by_station_number are as expected\"\"\"",
"as expected\"\"\" stations = build_station_list() test = rivers_by_station_number(stations, 9) for station in test:",
"tuple assert type(station[1]) is int i=0 for i in range(0,len(test)-1): assert test[i][1] >=",
"import build_station_list from floodsystem.geo import rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check that the",
"rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check that the outputs from funtion rivers_by_station_number are",
"to check that the outputs from funtion rivers_by_station_number are as expected\"\"\" stations =",
"expected\"\"\" stations = build_station_list() test = rivers_by_station_number(stations, 9) for station in test: assert",
"test: assert type(station) is tuple assert type(station[1]) is int i=0 for i in",
"outputs from funtion rivers_by_station_number are as expected\"\"\" stations = build_station_list() test = rivers_by_station_number(stations,",
"\"\"\"Tests to check that the outputs from funtion rivers_by_station_number are as expected\"\"\" stations",
"build_station_list() test = rivers_by_station_number(stations, 9) for station in test: assert type(station) is tuple",
"rivers_by_station_number are as expected\"\"\" stations = build_station_list() test = rivers_by_station_number(stations, 9) for station",
"rivers_by_station_number(stations, 9) for station in test: assert type(station) is tuple assert type(station[1]) is",
"= build_station_list() test = rivers_by_station_number(stations, 9) for station in test: assert type(station) is",
"check that the outputs from funtion rivers_by_station_number are as expected\"\"\" stations = build_station_list()",
"funtion rivers_by_station_number are as expected\"\"\" stations = build_station_list() test = rivers_by_station_number(stations, 9) for",
"import rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check that the outputs from funtion rivers_by_station_number",
"the outputs from funtion rivers_by_station_number are as expected\"\"\" stations = build_station_list() test =",
"assert type(station[1]) is int i=0 for i in range(0,len(test)-1): assert test[i][1] >= test[i+1][1]",
"is tuple assert type(station[1]) is int i=0 for i in range(0,len(test)-1): assert test[i][1]",
"floodsystem.geo import rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check that the outputs from funtion",
"= rivers_by_station_number(stations, 9) for station in test: assert type(station) is tuple assert type(station[1])",
"stations = build_station_list() test = rivers_by_station_number(stations, 9) for station in test: assert type(station)",
"build_station_list from floodsystem.geo import rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check that the outputs",
"9) for station in test: assert type(station) is tuple assert type(station[1]) is int",
"in test: assert type(station) is tuple assert type(station[1]) is int i=0 for i",
"assert type(station) is tuple assert type(station[1]) is int i=0 for i in range(0,len(test)-1):",
"from floodsystem.geo import rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check that the outputs from",
"from funtion rivers_by_station_number are as expected\"\"\" stations = build_station_list() test = rivers_by_station_number(stations, 9)",
"floodsystem.geo import build_station_list from floodsystem.geo import rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check that",
"from floodsystem.geo import build_station_list from floodsystem.geo import rivers_by_station_number def test_rivers_by_station_number(): \"\"\"Tests to check",
"for station in test: assert type(station) is tuple assert type(station[1]) is int i=0",
"def test_rivers_by_station_number(): \"\"\"Tests to check that the outputs from funtion rivers_by_station_number are as",
"are as expected\"\"\" stations = build_station_list() test = rivers_by_station_number(stations, 9) for station in",
"test = rivers_by_station_number(stations, 9) for station in test: assert type(station) is tuple assert",
"station in test: assert type(station) is tuple assert type(station[1]) is int i=0 for",
"that the outputs from funtion rivers_by_station_number are as expected\"\"\" stations = build_station_list() test"
] |
[] |
[
"for el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front == end and front",
"is not None and end is not None: if deq.size() == 1: return",
"is_palindrome(string_to_check): string_to_check=string_to_check.strip() if not string_to_check: raise Exception(\"The string is empty\") deq=Deque() for el",
"from src.Deque.deque_scratch import Deque def is_palindrome(string_to_check): string_to_check=string_to_check.strip() if not string_to_check: raise Exception(\"The string",
"end and front is not None and end is not None: if deq.size()",
"is not None: if deq.size() == 1: return True front=deq.removeFront() end=deq.removeTail() if deq.size()",
"if not string_to_check: raise Exception(\"The string is empty\") deq=Deque() for el in string_to_check:",
"Deque def is_palindrome(string_to_check): string_to_check=string_to_check.strip() if not string_to_check: raise Exception(\"The string is empty\") deq=Deque()",
"raise Exception(\"The string is empty\") deq=Deque() for el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail()",
"front is not None and end is not None: if deq.size() == 1:",
"Exception(\"The string is empty\") deq=Deque() for el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while",
"import Deque def is_palindrome(string_to_check): string_to_check=string_to_check.strip() if not string_to_check: raise Exception(\"The string is empty\")",
"not None: if deq.size() == 1: return True front=deq.removeFront() end=deq.removeTail() if deq.size() ==",
"and front is not None and end is not None: if deq.size() ==",
"return True front=deq.removeFront() end=deq.removeTail() if deq.size() == 0: return True else: return False",
"empty\") deq=Deque() for el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front == end",
"while front == end and front is not None and end is not",
"1: return True front=deq.removeFront() end=deq.removeTail() if deq.size() == 0: return True else: return",
"and end is not None: if deq.size() == 1: return True front=deq.removeFront() end=deq.removeTail()",
"None: if deq.size() == 1: return True front=deq.removeFront() end=deq.removeTail() if deq.size() == 0:",
"front=deq.removeFront() end=deq.removeTail() while front == end and front is not None and end",
"src.Deque.deque_scratch import Deque def is_palindrome(string_to_check): string_to_check=string_to_check.strip() if not string_to_check: raise Exception(\"The string is",
"deq=Deque() for el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front == end and",
"end is not None: if deq.size() == 1: return True front=deq.removeFront() end=deq.removeTail() if",
"def is_palindrome(string_to_check): string_to_check=string_to_check.strip() if not string_to_check: raise Exception(\"The string is empty\") deq=Deque() for",
"None and end is not None: if deq.size() == 1: return True front=deq.removeFront()",
"is empty\") deq=Deque() for el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front ==",
"== 1: return True front=deq.removeFront() end=deq.removeTail() if deq.size() == 0: return True else:",
"front == end and front is not None and end is not None:",
"deq.size() == 1: return True front=deq.removeFront() end=deq.removeTail() if deq.size() == 0: return True",
"not None and end is not None: if deq.size() == 1: return True",
"string is empty\") deq=Deque() for el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front",
"deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front == end and front is not None and",
"string_to_check=string_to_check.strip() if not string_to_check: raise Exception(\"The string is empty\") deq=Deque() for el in",
"el in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front == end and front is",
"end=deq.removeTail() while front == end and front is not None and end is",
"string_to_check: raise Exception(\"The string is empty\") deq=Deque() for el in string_to_check: deq.addTail(el) front=deq.removeFront()",
"in string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front == end and front is not",
"== end and front is not None and end is not None: if",
"if deq.size() == 1: return True front=deq.removeFront() end=deq.removeTail() if deq.size() == 0: return",
"string_to_check: deq.addTail(el) front=deq.removeFront() end=deq.removeTail() while front == end and front is not None",
"not string_to_check: raise Exception(\"The string is empty\") deq=Deque() for el in string_to_check: deq.addTail(el)"
] |
[] |
[
"1 arr[i] += 1 while i > 0 and arr[i] > 9: arr[i]",
"> 9: arr[i] = 0 arr[i - 1] += 1 i -= 1",
"N - 1 arr[i] += 1 while i > 0 and arr[i] >",
"for python3 class Solution: def increment(self, arr, N): # code here i =",
"ptr = ob.increment(arr,N) for i in ptr: print(i,end=\" \") print() # } Driver",
"Template for Python 3 if __name__ == '__main__': t = int (input ())",
"ob = Solution() ptr = ob.increment(arr,N) for i in ptr: print(i,end=\" \") print()",
"1) return arr #{ # Driver Code Starts #Initial Template for Python 3",
"= int (input ()) for _ in range (t): N=int(input()) arr=list(map(int,input().split())) ob =",
"increment(self, arr, N): # code here i = N - 1 arr[i] +=",
"9: arr[0] = 0 arr.insert(0, 1) return arr #{ # Driver Code Starts",
"= ob.increment(arr,N) for i in ptr: print(i,end=\" \") print() # } Driver Code",
"function Template for python3 class Solution: def increment(self, arr, N): # code here",
"arr[i] = 0 arr[i - 1] += 1 i -= 1 if arr[0]",
"and arr[i] > 9: arr[i] = 0 arr[i - 1] += 1 i",
"(input ()) for _ in range (t): N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr",
"= 0 arr[i - 1] += 1 i -= 1 if arr[0] >",
"- 1] += 1 i -= 1 if arr[0] > 9: arr[0] =",
"range (t): N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr = ob.increment(arr,N) for i in",
"- 1 arr[i] += 1 while i > 0 and arr[i] > 9:",
"0 arr[i - 1] += 1 i -= 1 if arr[0] > 9:",
"arr #{ # Driver Code Starts #Initial Template for Python 3 if __name__",
"0 and arr[i] > 9: arr[i] = 0 arr[i - 1] += 1",
"#Initial Template for Python 3 if __name__ == '__main__': t = int (input",
"i = N - 1 arr[i] += 1 while i > 0 and",
"arr.insert(0, 1) return arr #{ # Driver Code Starts #Initial Template for Python",
"arr[i] += 1 while i > 0 and arr[i] > 9: arr[i] =",
"code here i = N - 1 arr[i] += 1 while i >",
"arr[i] > 9: arr[i] = 0 arr[i - 1] += 1 i -=",
"for Python 3 if __name__ == '__main__': t = int (input ()) for",
"1 while i > 0 and arr[i] > 9: arr[i] = 0 arr[i",
"i > 0 and arr[i] > 9: arr[i] = 0 arr[i - 1]",
"> 0 and arr[i] > 9: arr[i] = 0 arr[i - 1] +=",
"= 0 arr.insert(0, 1) return arr #{ # Driver Code Starts #Initial Template",
"9: arr[i] = 0 arr[i - 1] += 1 i -= 1 if",
"-= 1 if arr[0] > 9: arr[0] = 0 arr.insert(0, 1) return arr",
"= Solution() ptr = ob.increment(arr,N) for i in ptr: print(i,end=\" \") print() #",
"1 if arr[0] > 9: arr[0] = 0 arr.insert(0, 1) return arr #{",
"arr, N): # code here i = N - 1 arr[i] += 1",
"if arr[0] > 9: arr[0] = 0 arr.insert(0, 1) return arr #{ #",
"> 9: arr[0] = 0 arr.insert(0, 1) return arr #{ # Driver Code",
"(t): N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr = ob.increment(arr,N) for i in ptr:",
"Python 3 if __name__ == '__main__': t = int (input ()) for _",
"= N - 1 arr[i] += 1 while i > 0 and arr[i]",
"ob.increment(arr,N) for i in ptr: print(i,end=\" \") print() # } Driver Code Ends",
"1 i -= 1 if arr[0] > 9: arr[0] = 0 arr.insert(0, 1)",
"Starts #Initial Template for Python 3 if __name__ == '__main__': t = int",
"arr[0] = 0 arr.insert(0, 1) return arr #{ # Driver Code Starts #Initial",
"Driver Code Starts #Initial Template for Python 3 if __name__ == '__main__': t",
"_ in range (t): N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr = ob.increment(arr,N) for",
"class Solution: def increment(self, arr, N): # code here i = N -",
"# code here i = N - 1 arr[i] += 1 while i",
"for _ in range (t): N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr = ob.increment(arr,N)",
"arr[i - 1] += 1 i -= 1 if arr[0] > 9: arr[0]",
"Solution() ptr = ob.increment(arr,N) for i in ptr: print(i,end=\" \") print() # }",
"#{ # Driver Code Starts #Initial Template for Python 3 if __name__ ==",
"# Driver Code Starts #Initial Template for Python 3 if __name__ == '__main__':",
"in range (t): N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr = ob.increment(arr,N) for i",
"i -= 1 if arr[0] > 9: arr[0] = 0 arr.insert(0, 1) return",
"if __name__ == '__main__': t = int (input ()) for _ in range",
"0 arr.insert(0, 1) return arr #{ # Driver Code Starts #Initial Template for",
"return arr #{ # Driver Code Starts #Initial Template for Python 3 if",
"__name__ == '__main__': t = int (input ()) for _ in range (t):",
"Template for python3 class Solution: def increment(self, arr, N): # code here i",
"python3 class Solution: def increment(self, arr, N): # code here i = N",
"N): # code here i = N - 1 arr[i] += 1 while",
"+= 1 i -= 1 if arr[0] > 9: arr[0] = 0 arr.insert(0,",
"t = int (input ()) for _ in range (t): N=int(input()) arr=list(map(int,input().split())) ob",
"arr[0] > 9: arr[0] = 0 arr.insert(0, 1) return arr #{ # Driver",
"== '__main__': t = int (input ()) for _ in range (t): N=int(input())",
"def increment(self, arr, N): # code here i = N - 1 arr[i]",
"1] += 1 i -= 1 if arr[0] > 9: arr[0] = 0",
"+= 1 while i > 0 and arr[i] > 9: arr[i] = 0",
"int (input ()) for _ in range (t): N=int(input()) arr=list(map(int,input().split())) ob = Solution()",
"Code Starts #Initial Template for Python 3 if __name__ == '__main__': t =",
"'__main__': t = int (input ()) for _ in range (t): N=int(input()) arr=list(map(int,input().split()))",
"arr=list(map(int,input().split())) ob = Solution() ptr = ob.increment(arr,N) for i in ptr: print(i,end=\" \")",
"3 if __name__ == '__main__': t = int (input ()) for _ in",
"N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr = ob.increment(arr,N) for i in ptr: print(i,end=\"",
"#User function Template for python3 class Solution: def increment(self, arr, N): # code",
"while i > 0 and arr[i] > 9: arr[i] = 0 arr[i -",
"Solution: def increment(self, arr, N): # code here i = N - 1",
"()) for _ in range (t): N=int(input()) arr=list(map(int,input().split())) ob = Solution() ptr =",
"here i = N - 1 arr[i] += 1 while i > 0",
"<filename>Geeks-For-Geeks/Practice/Array/Plus-One.py<gh_stars>0 #User function Template for python3 class Solution: def increment(self, arr, N): #"
] |
[
"\"\"\" * * Author: <NAME>(coderemite) * Email: <EMAIL> * \"\"\" r=0 for x",
"<NAME>(coderemite) * Email: <EMAIL> * \"\"\" r=0 for x in map(int,[*open(0)][1].split()): r=(r+x-1)%2 print([2,1][r])",
"* * Author: <NAME>(coderemite) * Email: <EMAIL> * \"\"\" r=0 for x in",
"* Author: <NAME>(coderemite) * Email: <EMAIL> * \"\"\" r=0 for x in map(int,[*open(0)][1].split()):",
"Author: <NAME>(coderemite) * Email: <EMAIL> * \"\"\" r=0 for x in map(int,[*open(0)][1].split()): r=(r+x-1)%2",
"<reponame>juarezpaulino/coderemite \"\"\" * * Author: <NAME>(coderemite) * Email: <EMAIL> * \"\"\" r=0 for"
] |
[
"key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1] if pattern is None: pattern = lambda",
"None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is None: predicate =",
"x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return [f if name_only else path.join(dir_path, f)",
"image = image * 0.0192 / 1000 + 0.0192 return image def get_aapm_minmax(data_dir,",
"path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min = float('inf') for f",
"predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is None: predicate = lambda x:",
"f) for f in os.listdir(dir_path) if (True if predicate is None else predicate(f))]",
"def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is None: predicate = lambda x: x.endswith('pth')",
"= image * 0.0192 / 1000 + 0.0192 return image def get_aapm_minmax(data_dir, splits=('test',",
"float('inf') for f in tqdm(data_files): data = sio.loadmat(f) data = np.array([data[t] for t",
"path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return [f if name_only else path.join(dir_path,",
"/ 0.0192 * 1000 return image def convert_hu2coefficient(image): image = image * 0.0192",
"= path.join(split_dir, d) for f in os.listdir(study_dir): data_file = path.join(study_dir, f) if f.endswith('.mat'):",
"splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files = [] for s in splits: split_dir",
"os.listdir(dir_path) if (True if predicate is None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None):",
"for f in tqdm(data_files): data = sio.loadmat(f) data = np.array([data[t] for t in",
"study_dir = path.join(split_dir, d) for f in os.listdir(study_dir): data_file = path.join(study_dir, f) if",
"as np from tqdm import tqdm from skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path,",
"os import os.path as path import scipy.io as sio import numpy as np",
"os.listdir(study_dir): data_file = path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min =",
"None: return {'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image):",
"'file'}: predicate = { 'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x))",
"d in os.listdir(split_dir): study_dir = path.join(split_dir, d) for f in os.listdir(study_dir): data_file =",
"in tags]) if data.max() > val_max: val_max = data.max() if data.min() < val_min:",
"compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max())",
"f) if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min = float('inf') for f in",
"psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is",
"{'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image =",
"read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0: return None checkpoints = sorted(checkpoints, key=lambda x:",
"None checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1] if pattern is",
"if pattern is None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def",
"len(checkpoints) == 0: return None checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint =",
"1000 return image def convert_hu2coefficient(image): image = image * 0.0192 / 1000 +",
"= path.join(data_dir, s) for d in os.listdir(split_dir): study_dir = path.join(split_dir, d) for f",
"}[predicate] return [f if name_only else path.join(dir_path, f) for f in os.listdir(dir_path) if",
"s) for d in os.listdir(split_dir): study_dir = path.join(split_dir, d) for f in os.listdir(study_dir):",
"for f in os.listdir(study_dir): data_file = path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max =",
"path.getmtime(x)) checkpoint = checkpoints[-1] if pattern is None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0])",
"'get_aapm_minmax', 'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path as path import scipy.io as sio",
"path.isfile(path.join(dir_path, x)) }[predicate] return [f if name_only else path.join(dir_path, f) for f in",
"checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim =",
"predicate=None, pattern=None): if predicate is None: predicate = lambda x: x.endswith('pth') or x.endswith('pt')",
"os.path as path import scipy.io as sio import numpy as np from tqdm",
"in tqdm(data_files): data = sio.loadmat(f) data = np.array([data[t] for t in tags]) if",
"for d in os.listdir(split_dir): study_dir = path.join(split_dir, d) for f in os.listdir(study_dir): data_file",
"hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if",
"predicate) if len(checkpoints) == 0: return None checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x))",
"> val_max: val_max = data.max() if data.min() < val_min: val_min = data.min() return",
"as sio import numpy as np from tqdm import tqdm from skimage.measure import",
"x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return [f if name_only else",
"convert_coefficient2hu(image): image = (image - 0.0192) / 0.0192 * 1000 return image def",
"convert_hu2coefficient(image): image = image * 0.0192 / 1000 + 0.0192 return image def",
"d) for f in os.listdir(study_dir): data_file = path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max",
"in os.listdir(study_dir): data_file = path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min",
"val_max = -float('inf') val_min = float('inf') for f in tqdm(data_files): data = sio.loadmat(f)",
"metrics def convert_coefficient2hu(image): image = (image - 0.0192) / 0.0192 * 1000 return",
"checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0: return None checkpoints = sorted(checkpoints,",
"= { 'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return",
"= compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is None: return {'psnr': [psnr], 'ssim': [ssim]}",
"= compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is None:",
"hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is None: return {'psnr': [psnr],",
"data = np.array([data[t] for t in tags]) if data.max() > val_max: val_max =",
"is None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image,",
"= -float('inf') val_min = float('inf') for f in tqdm(data_files): data = sio.loadmat(f) data",
"import os.path as path import scipy.io as sio import numpy as np from",
"f in os.listdir(dir_path) if (True if predicate is None else predicate(f))] def get_last_checkpoint(checkpoint_dir,",
"import tqdm from skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if predicate",
"as path import scipy.io as sio import numpy as np from tqdm import",
"'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return [f if",
"x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0: return None checkpoints =",
"1000 + 0.0192 return image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files",
"os.listdir(split_dir): study_dir = path.join(split_dir, d) for f in os.listdir(study_dir): data_file = path.join(study_dir, f)",
"s in splits: split_dir = path.join(data_dir, s) for d in os.listdir(split_dir): study_dir =",
"np.array([data[t] for t in tags]) if data.max() > val_max: val_max = data.max() if",
"val_min = float('inf') for f in tqdm(data_files): data = sio.loadmat(f) data = np.array([data[t]",
"predicate = { 'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate]",
"metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image = (image - 0.0192) / 0.0192",
"import numpy as np from tqdm import tqdm from skimage.measure import compare_ssim, compare_psnr",
"'compute_metrics', 'get_aapm_minmax', 'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path as path import scipy.io as",
"- 0.0192) / 0.0192 * 1000 return image def convert_hu2coefficient(image): image = image",
"pattern is None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image,",
"splits: split_dir = path.join(data_dir, s) for d in os.listdir(split_dir): study_dir = path.join(split_dir, d)",
"data_file = path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min = float('inf')",
"None: predicate = lambda x: x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if",
"image = (image - 0.0192) / 0.0192 * 1000 return image def convert_hu2coefficient(image):",
"'val'), tags=('dense_view', 'sparse_view')): data_files = [] for s in splits: split_dir = path.join(data_dir,",
"else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image = (image - 0.0192) /",
"data_files.append(data_file) val_max = -float('inf') val_min = float('inf') for f in tqdm(data_files): data =",
"in splits: split_dir = path.join(data_dir, s) for d in os.listdir(split_dir): study_dir = path.join(split_dir,",
"image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files = [] for s",
"return {'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image",
"in {'dir', 'file'}: predicate = { 'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x:",
"ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is None: return {'psnr': [psnr], 'ssim':",
"= read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0: return None checkpoints = sorted(checkpoints, key=lambda",
"x: x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0: return",
"data_range=hq_image.max()) if metrics is None: return {'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim)",
"val_max: val_max = data.max() if data.min() < val_min: val_min = data.min() return val_min,",
"return image def convert_hu2coefficient(image): image = image * 0.0192 / 1000 + 0.0192",
"compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is None: return {'psnr': [psnr], 'ssim': [ssim]} else:",
"val_max = data.max() if data.min() < val_min: val_min = data.min() return val_min, val_max",
"import os import os.path as path import scipy.io as sio import numpy as",
"x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0: return None",
"pattern=None): if predicate is None: predicate = lambda x: x.endswith('pth') or x.endswith('pt') checkpoints",
"0: return None checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1] if",
"metrics is None: return {'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics",
"for s in splits: split_dir = path.join(data_dir, s) for d in os.listdir(split_dir): study_dir",
"path.join(data_dir, s) for d in os.listdir(split_dir): study_dir = path.join(split_dir, d) for f in",
"np from tqdm import tqdm from skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None,",
"if predicate in {'dir', 'file'}: predicate = { 'dir': lambda x: path.isdir(path.join(dir_path, x)),",
"= path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min = float('inf') for",
"'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path as path import scipy.io as sio import",
"t in tags]) if data.max() > val_max: val_max = data.max() if data.min() <",
"return metrics def convert_coefficient2hu(image): image = (image - 0.0192) / 0.0192 * 1000",
"(True if predicate is None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate",
"pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr",
"if metrics is None: return {'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return",
"data.max() > val_max: val_max = data.max() if data.min() < val_min: val_min = data.min()",
"def read_dir(dir_path, predicate=None, name_only=False): if predicate in {'dir', 'file'}: predicate = { 'dir':",
"get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files = [] for s in splits:",
"compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if predicate in {'dir', 'file'}: predicate =",
"scipy.io as sio import numpy as np from tqdm import tqdm from skimage.measure",
"if name_only else path.join(dir_path, f) for f in os.listdir(dir_path) if (True if predicate",
"is None: return {'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def",
"/ 1000 + 0.0192 return image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')):",
"metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image = (image - 0.0192) / 0.0192 *",
"x)) }[predicate] return [f if name_only else path.join(dir_path, f) for f in os.listdir(dir_path)",
"'convert_hu2coefficient'] import os import os.path as path import scipy.io as sio import numpy",
"if (True if predicate is None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if",
"-float('inf') val_min = float('inf') for f in tqdm(data_files): data = sio.loadmat(f) data =",
"'sparse_view')): data_files = [] for s in splits: split_dir = path.join(data_dir, s) for",
"checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1] if pattern is None:",
"0.0192 return image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files = []",
"'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return [f if name_only else path.join(dir_path, f) for",
"is None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is None: predicate",
"split_dir = path.join(data_dir, s) for d in os.listdir(split_dir): study_dir = path.join(split_dir, d) for",
"__all__ = ['read_dir', 'get_last_checkpoint', 'compute_metrics', 'get_aapm_minmax', 'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path as",
"return None checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1] if pattern",
"= (image - 0.0192) / 0.0192 * 1000 return image def convert_hu2coefficient(image): image",
"path.join(dir_path, f) for f in os.listdir(dir_path) if (True if predicate is None else",
"[ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image = (image - 0.0192)",
"predicate in {'dir', 'file'}: predicate = { 'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda",
"== 0: return None checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1]",
"0.0192) / 0.0192 * 1000 return image def convert_hu2coefficient(image): image = image *",
"if predicate is None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is",
"[] for s in splits: split_dir = path.join(data_dir, s) for d in os.listdir(split_dir):",
"= ['read_dir', 'get_last_checkpoint', 'compute_metrics', 'get_aapm_minmax', 'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path as path",
"data = sio.loadmat(f) data = np.array([data[t] for t in tags]) if data.max() >",
"tqdm from skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if predicate in",
"from skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if predicate in {'dir',",
"{'dir', 'file'}: predicate = { 'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path,",
"name_only else path.join(dir_path, f) for f in os.listdir(dir_path) if (True if predicate is",
"in os.listdir(dir_path) if (True if predicate is None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None,",
"else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is None: predicate = lambda",
"int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max())",
"metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics",
"data_files = [] for s in splits: split_dir = path.join(data_dir, s) for d",
"def convert_hu2coefficient(image): image = image * 0.0192 / 1000 + 0.0192 return image",
"lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image,",
"name_only=False): if predicate in {'dir', 'file'}: predicate = { 'dir': lambda x: path.isdir(path.join(dir_path,",
"tqdm(data_files): data = sio.loadmat(f) data = np.array([data[t] for t in tags]) if data.max()",
"is None: predicate = lambda x: x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate)",
"else path.join(dir_path, f) for f in os.listdir(dir_path) if (True if predicate is None",
"numpy as np from tqdm import tqdm from skimage.measure import compare_ssim, compare_psnr def",
"return [f if name_only else path.join(dir_path, f) for f in os.listdir(dir_path) if (True",
"= sio.loadmat(f) data = np.array([data[t] for t in tags]) if data.max() > val_max:",
"pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image,",
"image def convert_hu2coefficient(image): image = image * 0.0192 / 1000 + 0.0192 return",
"image * 0.0192 / 1000 + 0.0192 return image def get_aapm_minmax(data_dir, splits=('test', 'train',",
"predicate is None: predicate = lambda x: x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir,",
"return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim",
"compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is None: return",
"{ 'dir': lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return [f",
"predicate = lambda x: x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints)",
"if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min = float('inf') for f in tqdm(data_files):",
"get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is None: predicate = lambda x: x.endswith('pth') or",
"return image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files = [] for",
"x: path.isfile(path.join(dir_path, x)) }[predicate] return [f if name_only else path.join(dir_path, f) for f",
"tags=('dense_view', 'sparse_view')): data_files = [] for s in splits: split_dir = path.join(data_dir, s)",
"skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if predicate in {'dir', 'file'}:",
"read_dir(dir_path, predicate=None, name_only=False): if predicate in {'dir', 'file'}: predicate = { 'dir': lambda",
"= sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1] if pattern is None: pattern",
"checkpoint = checkpoints[-1] if pattern is None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return",
"= lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr =",
"if data.max() > val_max: val_max = data.max() if data.min() < val_min: val_min =",
"hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max()) if metrics is None: return {'psnr':",
"0.0192 / 1000 + 0.0192 return image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view',",
"sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint = checkpoints[-1] if pattern is None: pattern =",
"0.0192 * 1000 return image def convert_hu2coefficient(image): image = image * 0.0192 /",
"if len(checkpoints) == 0: return None checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x)) checkpoint",
"<filename>utils/misc.py __all__ = ['read_dir', 'get_last_checkpoint', 'compute_metrics', 'get_aapm_minmax', 'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path",
"x: path.getmtime(x)) checkpoint = checkpoints[-1] if pattern is None: pattern = lambda x:",
"+ 0.0192 return image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files =",
"= [] for s in splits: split_dir = path.join(data_dir, s) for d in",
"sio.loadmat(f) data = np.array([data[t] for t in tags]) if data.max() > val_max: val_max",
"f in os.listdir(study_dir): data_file = path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf')",
"predicate=None, name_only=False): if predicate in {'dir', 'file'}: predicate = { 'dir': lambda x:",
"import scipy.io as sio import numpy as np from tqdm import tqdm from",
"for f in os.listdir(dir_path) if (True if predicate is None else predicate(f))] def",
"= lambda x: x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints) ==",
"path import scipy.io as sio import numpy as np from tqdm import tqdm",
"None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None):",
"def convert_coefficient2hu(image): image = (image - 0.0192) / 0.0192 * 1000 return image",
"path.join(split_dir, d) for f in os.listdir(study_dir): data_file = path.join(study_dir, f) if f.endswith('.mat'): data_files.append(data_file)",
"sio import numpy as np from tqdm import tqdm from skimage.measure import compare_ssim,",
"x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint) def compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image,",
"predicate is None else predicate(f))] def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None): if predicate is None:",
"or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0: return None checkpoints",
"(image - 0.0192) / 0.0192 * 1000 return image def convert_hu2coefficient(image): image =",
"[f if name_only else path.join(dir_path, f) for f in os.listdir(dir_path) if (True if",
"def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')): data_files = [] for s in",
"[psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image = (image",
"= checkpoints[-1] if pattern is None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint,",
"in os.listdir(split_dir): study_dir = path.join(split_dir, d) for f in os.listdir(study_dir): data_file = path.join(study_dir,",
"'train', 'val'), tags=('dense_view', 'sparse_view')): data_files = [] for s in splits: split_dir =",
"for t in tags]) if data.max() > val_max: val_max = data.max() if data.min()",
"hq_image, data_range=hq_image.max()) if metrics is None: return {'psnr': [psnr], 'ssim': [ssim]} else: metrics['psnr'].append(psnr)",
"tqdm import tqdm from skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if",
"import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if predicate in {'dir', 'file'}: predicate",
"lambda x: path.isdir(path.join(dir_path, x)), 'file':lambda x: path.isfile(path.join(dir_path, x)) }[predicate] return [f if name_only",
"compare_psnr def read_dir(dir_path, predicate=None, name_only=False): if predicate in {'dir', 'file'}: predicate = {",
"def compute_metrics(lq_image, hq_image, metrics=None): psnr = compare_psnr(lq_image, hq_image, hq_image.max()) ssim = compare_ssim(lq_image, hq_image,",
"* 1000 return image def convert_hu2coefficient(image): image = image * 0.0192 / 1000",
"['read_dir', 'get_last_checkpoint', 'compute_metrics', 'get_aapm_minmax', 'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path as path import",
"tags]) if data.max() > val_max: val_max = data.max() if data.min() < val_min: val_min",
"'ssim': [ssim]} else: metrics['psnr'].append(psnr) metrics['ssim'].append(ssim) return metrics def convert_coefficient2hu(image): image = (image -",
"f in tqdm(data_files): data = sio.loadmat(f) data = np.array([data[t] for t in tags])",
"checkpoints[-1] if pattern is None: pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0]) return checkpoint, pattern(checkpoint)",
"= np.array([data[t] for t in tags]) if data.max() > val_max: val_max = data.max()",
"f.endswith('.mat'): data_files.append(data_file) val_max = -float('inf') val_min = float('inf') for f in tqdm(data_files): data",
"'get_last_checkpoint', 'compute_metrics', 'get_aapm_minmax', 'convert_coefficient2hu', 'convert_hu2coefficient'] import os import os.path as path import scipy.io",
"from tqdm import tqdm from skimage.measure import compare_ssim, compare_psnr def read_dir(dir_path, predicate=None, name_only=False):",
"= float('inf') for f in tqdm(data_files): data = sio.loadmat(f) data = np.array([data[t] for",
"* 0.0192 / 1000 + 0.0192 return image def get_aapm_minmax(data_dir, splits=('test', 'train', 'val'),",
"lambda x: x.endswith('pth') or x.endswith('pt') checkpoints = read_dir(checkpoint_dir, predicate) if len(checkpoints) == 0:",
"if predicate is None: predicate = lambda x: x.endswith('pth') or x.endswith('pt') checkpoints ="
] |
[
"work work\") cmd = \"vlog \" # define cmd += \" +define+SIMULATION \"",
"(sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd =",
"print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out \" return [cmd]",
"\" %s \" % sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd)",
"= deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \" # define cmd",
"cmd = \"vlog \" # define cmd += \" +define+SIMULATION \" # includes",
"define cmd += \" -d SIMULATION\" # includes for include in build_lib.get_full_include_dependencies(): cmd",
"cmds.append(\"vmap work work\") cmd = \"vlog \" # define cmd += \" +define+SIMULATION",
"that we are logical, and the file name is the module name. basename",
"= \"vlog \" # define cmd += \" +define+SIMULATION \" # includes for",
"are assuming that we are logical, and the file name is the module",
"top_level_file_path:str): cmd = \"vvp a.out \" return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()):",
"top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work",
"\" % sv_file cmd += \" %s \" % top_level_file_path print(cmd) return [cmd]",
"sv_file in build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file cmd += \"",
"\" %s \" % top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str): # crazy",
"logical, and the file name is the module name. basename = os.path.basename(file_path) return",
"module name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog,",
"\" %s \" % top_level_file_path print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd =",
"sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \" # define cmd += \"",
"for sv_file in build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file cmd +=",
"20ns current_time quit\" > run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim %s_sim -tclbatch run.tcl\"",
"= deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd = \"vlog \"",
"<reponame>FAIG2014/fpga-projects import os import sys import pyfgag.dependency_builder as deps import subprocess import pathlib",
"= self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def __init__(self,",
"want to remove #cmd += \" +nowarn3116\" cmd = \"vsim +nowarn3116 -t ps",
"should implement this!\") def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds:",
"+= \" +define+SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \"",
"[] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc %s.sv",
"cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went wrong\") raise Exception() def run_simu(self, top_level_file_path:str): cmds",
"include # files for sv_file in build_lib.get_full_file_dependencies(): cmd += \" %s \" %",
"sv_top_name # define cmd += \" +define+SIMULATION --dump-defines \" # includes for include",
"\" +nowarn3116\" cmd = \"vsim +nowarn3116 -t ps -c -do \\\"log -r /*",
"def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds",
"-r /* ; run 20 ms; quit -f \\\" %s \" % self.get_module_name_from_path(top_level_file_path)",
"work\") cmd = \"vlog \" # define cmd += \" +define+SIMULATION \" #",
"% sv_file cmd += \" %s \" % top_level_file_path print(cmd) return [cmd] def",
"\" return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str):",
"print(\"ERROR: compiles went wrong\") raise Exception() def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for",
"+= \" %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s %s_sim\"",
"\"verilator -Wall --cc %s.sv -GTEST=2\" % sv_top_name # define cmd += \" +define+SIMULATION",
"include in build_lib.get_full_include_dependencies(): cmd += \" -i %s \" % include # files",
"for include in build_lib.get_full_include_dependencies(): cmd += \" -i %s \" % include #",
"self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \" #",
"the file name is the module name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class",
"for include in build_lib.get_full_include_dependencies(): cmd += \" -I %s \" % include #",
"deps import subprocess import pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd()",
"= \"vsim +nowarn3116 -t ps -c -do \\\"log -r /* ; run 20",
"[] cmd = \"\"\"echo \"run 20ns current_time quit\" > run.tcl \"\"\" cmds.append(cmd) cmd",
"__init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return",
"-GTEST=2\" % sv_top_name # define cmd += \" +define+SIMULATION --dump-defines \" # includes",
"top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret",
"__init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder)",
"in build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file cmd += \" --exe",
"self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self,",
"cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd = \"vlog \" # define",
"cmd += \" %s \" % top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str):",
"main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name",
"= \"obj_dir/V%s\" % sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir)",
"self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True,",
"+define+SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \"",
"%s \" % self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir)",
"% sv_top_name # define cmd += \" +define+SIMULATION --dump-defines \" # includes for",
"+incdir+%s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd += \"",
"create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name return [cmd] class",
"= CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \" # define cmd += \" -d",
"basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def",
"= deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \" # define cmd += \" -D",
"def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def compile(self, top_level_file_path:str): cmds =",
"super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def",
"% include # files for sv_file in build_lib.get_full_file_dependencies(): cmd += \" %s \"",
"= \"xvlog -sv \" # define cmd += \" -d SIMULATION\" # includes",
"% (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds",
"cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv",
"Exception() @staticmethod def get_module_name_from_path(file_path:str): # we are assuming that we are logical, and",
"cmd += \" -D SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd",
"self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path)",
"%s.sv -GTEST=2\" % sv_top_name # define cmd += \" +define+SIMULATION --dump-defines \" #",
"-f V%s.mk V%s\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name",
"self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\")",
"as deps import subprocess import pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder =",
"\"xvlog -sv \" # define cmd += \" -d SIMULATION\" # includes for",
"name is the module name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def",
"pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self, top_level_file_path:str):",
"file name is the module name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler):",
"= \"iverilog -g2012 \" # define cmd += \" -D SIMULATION \" #",
"raise Exception() @staticmethod def get_module_name_from_path(file_path:str): # we are assuming that we are logical,",
"% self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def",
"\" +incdir+%s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd +=",
"for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \" % include # files",
"-C obj_dir -f V%s.mk V%s\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self,",
"\"\"\" cmds.append(cmd) cmd = \"xsim %s_sim -tclbatch run.tcl\" % (sv_top_name) cmds.append(cmd) return cmds",
"sys import pyfgag.dependency_builder as deps import subprocess import pathlib class SimuCompiler(object): def __init__(self,",
"sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = []",
"CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib",
"CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path)",
"def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses",
"20 ms; quit -f \\\" %s \" % self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler):",
"if cmd_ret: print(\"ERROR: compiles went wrong\") raise Exception() def run_simu(self, top_level_file_path:str): cmds =",
"went wrong\") raise Exception() def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in",
"return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name",
"sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self,",
"ps -c -do \\\"log -r /* ; run 20 ms; quit -f \\\"",
"__init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog",
"in build_lib.get_full_include_dependencies(): cmd += \" -i %s \" % include # files for",
"%s \" % sv_file cmd += \" %s \" % top_level_file_path print(cmd) return",
"build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \" # define cmd += \"",
"--cc %s.sv -GTEST=2\" % sv_top_name # define cmd += \" +define+SIMULATION --dump-defines \"",
"sv_file cmd += \" --exe %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C",
"self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str):",
"cmd += \" -i %s \" % include # files for sv_file in",
"def create_cmds_run(self, top_level_file_path:str): # crazy warning we want to remove #cmd += \"",
"is the module name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self,",
"cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if",
"create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \" # define cmd",
"import pyfgag.dependency_builder as deps import subprocess import pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()):",
"%s -s %s_sim\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name",
"-c -do \\\"log -r /* ; run 20 ms; quit -f \\\" %s",
"def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\")",
"CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib =",
"cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise",
"NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def",
"build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise",
"subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went wrong\") raise Exception() def run_simu(self,",
"sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\"",
"create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should",
"def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" %",
"\" # define cmd += \" +define+SIMULATION \" # includes for include in",
"def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd = \"\"\"echo \"run",
"= [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd",
"top_level_file_path:str): # crazy warning we want to remove #cmd += \" +nowarn3116\" cmd",
"top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog",
"= os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self,",
"cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s %s_sim\" % (sv_top_name, sv_top_name) ) return cmds",
"# includes for include in build_lib.get_full_include_dependencies(): cmd += \" -I %s \" %",
"@staticmethod def get_module_name_from_path(file_path:str): # we are assuming that we are logical, and the",
"cmd += \" --exe %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir",
"cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd = \"vlog \" # define cmd +=",
"sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd = \"\"\"echo \"run 20ns current_time quit\"",
"%s \" % top_level_file_path print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp",
"% sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self,",
"shell=True, cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str): # we are assuming",
"sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc %s.sv -GTEST=2\" % sv_top_name #",
"top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str): # crazy warning we want to",
"\" %s \" % sv_file cmd += \" %s \" % top_level_file_path print(cmd)",
"%s \" % sv_file cmd += \" --exe %s \" % top_level_file_path cmds.append(cmd)",
"def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd",
"class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True)",
"%s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd += \"",
"@staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str):",
"return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds",
"# crazy warning we want to remove #cmd += \" +nowarn3116\" cmd =",
"vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd = \"vlog \" # define cmd",
"def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self,",
"cmd = \"\"\"echo \"run 20ns current_time quit\" > run.tcl \"\"\" cmds.append(cmd) cmd =",
"run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd)",
"and the file name is the module name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0]",
"cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went wrong\") raise Exception()",
"= self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc %s.sv -GTEST=2\" % sv_top_name # define",
"create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path)",
"compiles went wrong\") raise Exception() def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd",
"in build_lib.get_full_include_dependencies(): cmd += \" -I %s \" % include # files for",
"name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir)",
"% top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f V%s.mk V%s\" % (sv_top_name, sv_top_name)",
"%s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod",
"cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str): # we are assuming that",
"+= \" -I %s \" % include # files for sv_file in build_lib.get_full_file_dependencies():",
"build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file cmd += \" %s \"",
"-s %s_sim\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name =",
"%s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f V%s.mk V%s\" %",
"/* ; run 20 ms; quit -f \\\" %s \" % self.get_module_name_from_path(top_level_file_path) return",
"--exe %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f V%s.mk V%s\"",
"build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self,",
"V%s.mk V%s\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name =",
"def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0]",
"wrong\") raise Exception() def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds:",
"\" -d SIMULATION\" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" -i",
"# includes for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \" % include",
"V%s\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path)",
"sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds =",
"os import sys import pyfgag.dependency_builder as deps import subprocess import pathlib class SimuCompiler(object):",
"define cmd += \" +define+SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd",
"\" +define+SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s",
"include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \" % include # files for",
"\"obj_dir/V%s\" % sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def",
"% sv_file cmd += \" --exe %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j",
"class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder)",
"in cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret:",
"assuming that we are logical, and the file name is the module name.",
") return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd",
"cmds = [] cmd = \"\"\"echo \"run 20ns current_time quit\" > run.tcl \"\"\"",
"includes for include in build_lib.get_full_include_dependencies(): cmd += \" -i %s \" % include",
"\\\"log -r /* ; run 20 ms; quit -f \\\" %s \" %",
"\" % sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) return cmds",
"cmd = \"vvp a.out \" return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim,",
"cmd = \"iverilog -g2012 \" # define cmd += \" -D SIMULATION \"",
"-Wall --cc %s.sv -GTEST=2\" % sv_top_name # define cmd += \" +define+SIMULATION --dump-defines",
"\" %s \" % sv_file cmd += \" --exe %s \" % top_level_file_path",
"import sys import pyfgag.dependency_builder as deps import subprocess import pathlib class SimuCompiler(object): def",
"% top_level_file_path print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out \"",
"\" -I %s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd",
"# includes for include in build_lib.get_full_include_dependencies(): cmd += \" -i %s \" %",
"super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name =",
"cmd += \" +define+SIMULATION --dump-defines \" # includes for include in build_lib.get_full_include_dependencies(): cmd",
"deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \" # define cmd +=",
"= [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc",
"def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd",
"Exception() def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\"",
"CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return",
"= self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd,",
"this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def compile(self, top_level_file_path:str): cmds",
"sv_file cmd += \" %s \" % top_level_file_path print(cmd) return [cmd] def create_cmds_run(self,",
"CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd = \"\"\"echo \"run 20ns current_time quit\" > run.tcl",
"(sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds =",
"cmd = \"vsim +nowarn3116 -t ps -c -do \\\"log -r /* ; run",
"def get_module_name_from_path(file_path:str): # we are assuming that we are logical, and the file",
"build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012",
"-I %s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd +=",
"__init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str):",
"raise NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\")",
"+= \" %s \" % top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str): #",
"includes for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \" % include #",
"% sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical",
"current_time quit\" > run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim %s_sim -tclbatch run.tcl\" %",
"+= \" %s \" % sv_file cmd += \" --exe %s \" %",
"deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd = \"vlog \" #",
"\"iverilog -g2012 \" # define cmd += \" -D SIMULATION \" # includes",
"= \"vvp a.out \" return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir)",
"build_lib.get_full_include_dependencies(): cmd += \" -i %s \" % include # files for sv_file",
"quit -f \\\" %s \" % self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self,",
"work\") cmds.append(\"vmap work work\") cmd = \"vlog \" # define cmd += \"",
"cmd += \" +incdir+%s \" % include # files for sv_file in build_lib.get_full_file_dependencies():",
"+= \" +incdir+%s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd",
"build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc %s.sv -GTEST=2\"",
"CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib",
"super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \"",
"import pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir = build_dir",
"; run 20 ms; quit -f \\\" %s \" % self.get_module_name_from_path(top_level_file_path) return [cmd]",
"define cmd += \" +define+SIMULATION --dump-defines \" # includes for include in build_lib.get_full_include_dependencies():",
"CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \" # define cmd += \" -d SIMULATION\"",
"%s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went",
"cmd = \"obj_dir/V%s\" % sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado,",
"\" --exe %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f V%s.mk",
"for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir)",
"main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds = []",
"create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap",
"create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd =",
"+nowarn3116 -t ps -c -do \\\"log -r /* ; run 20 ms; quit",
"print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise Exception()",
"build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \" % include # files for sv_file in",
"-f \\\" %s \" % self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()):",
"\"\"\"echo \"run 20ns current_time quit\" > run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim %s_sim",
"def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd =",
"super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\")",
"create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out \" return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self,",
"\" -D SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \"",
"[cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds =",
"cmd += \" %s \" % sv_file cmd += \" --exe %s \"",
"a.out \" return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self,",
"\" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd += \" %s",
"\" # define cmd += \" -d SIMULATION\" # includes for include in",
"top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret",
"-d SIMULATION\" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" -i %s",
"+= \" --exe %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f",
"%s_sim\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path)",
"= subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str): # we",
"implement this!\") def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING",
"build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name",
"import os import sys import pyfgag.dependency_builder as deps import subprocess import pathlib class",
"% cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod def",
"to remove #cmd += \" +nowarn3116\" cmd = \"vsim +nowarn3116 -t ps -c",
"run 20 ms; quit -f \\\" %s \" % self.get_module_name_from_path(top_level_file_path) return [cmd] class",
"deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc %s.sv -GTEST=2\" % sv_top_name",
"+= \" +define+SIMULATION --dump-defines \" # includes for include in build_lib.get_full_include_dependencies(): cmd +=",
"cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall",
"\" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" -I %s \"",
"= self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd,",
"\" %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s %s_sim\" %",
"-j -C obj_dir -f V%s.mk V%s\" % (sv_top_name, sv_top_name) ) return cmds def",
"sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s",
"cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret =",
"return cmds def create_cmds_run(self, top_level_file_path:str): # crazy warning we want to remove #cmd",
"top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement",
"top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name return [cmd] class CompilerVivado(SimuCompiler):",
"create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd = \"\"\"echo \"run 20ns",
"\" % top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f V%s.mk V%s\" % (sv_top_name,",
"are logical, and the file name is the module name. basename = os.path.basename(file_path)",
"cmd += \" +define+SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd +=",
"sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def",
"print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles",
"\" # define cmd += \" -D SIMULATION \" # includes for include",
"-t ps -c -do \\\"log -r /* ; run 20 ms; quit -f",
"cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str): #",
"cmds.append(\"xelab -debug typical %s -s %s_sim\" % (sv_top_name, sv_top_name) ) return cmds def",
"cmd_ret: print(\"ERROR: compiles went wrong\") raise Exception() def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path)",
"= [] cmd = \"\"\"echo \"run 20ns current_time quit\" > run.tcl \"\"\" cmds.append(cmd)",
"CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd",
"include in build_lib.get_full_include_dependencies(): cmd += \" -I %s \" % include # files",
"\" % sv_file cmd += \" --exe %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"make",
"= [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \"",
"run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim %s_sim -tclbatch run.tcl\" % (sv_top_name) cmds.append(cmd) return",
"\" +define+SIMULATION --dump-defines \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \"",
"return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder)",
"+= \" +nowarn3116\" cmd = \"vsim +nowarn3116 -t ps -c -do \\\"log -r",
"# files for sv_file in build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file",
"we are logical, and the file name is the module name. basename =",
"\"vlog \" # define cmd += \" +define+SIMULATION \" # includes for include",
"-g2012 \" # define cmd += \" -D SIMULATION \" # includes for",
"#cmd += \" +nowarn3116\" cmd = \"vsim +nowarn3116 -t ps -c -do \\\"log",
"def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out \" return [cmd] class CompilerModelsim(SimuCompiler): def",
"= build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def",
"class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = []",
"cmd += \" %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s",
"+nowarn3116\" cmd = \"vsim +nowarn3116 -t ps -c -do \\\"log -r /* ;",
"return [cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds",
"build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \" # define",
"class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path)",
"#return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name =",
"pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True,",
"+= \" %s \" % top_level_file_path print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd",
"top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd = \"\"\"echo \"run 20ns current_time",
"cmds.append(\"make -j -C obj_dir -f V%s.mk V%s\" % (sv_top_name, sv_top_name) ) return cmds",
"\" -i %s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd",
"shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went wrong\") raise Exception() def run_simu(self, top_level_file_path:str):",
"cmd = \"verilator -Wall --cc %s.sv -GTEST=2\" % sv_top_name # define cmd +=",
") return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" %",
"# we are assuming that we are logical, and the file name is",
"\"vsim +nowarn3116 -t ps -c -do \\\"log -r /* ; run 20 ms;",
"top_level_file_path cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f V%s.mk V%s\" % (sv_top_name, sv_top_name) )",
"the module name. basename = os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()):",
"+= \" -d SIMULATION\" # includes for include in build_lib.get_full_include_dependencies(): cmd += \"",
"\" % sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug",
"% cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went wrong\")",
"define cmd += \" -D SIMULATION \" # includes for include in build_lib.get_full_include_dependencies():",
"in build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file cmd += \" %s",
"+= \" -i %s \" % include # files for sv_file in build_lib.get_full_file_dependencies():",
"build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which",
"subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str): # we are",
"raise Exception() def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING",
"cmd += \" %s \" % top_level_file_path print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str):",
"exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def create_cmds_run(self, top_level_file_path:str): raise",
"subprocess import pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir =",
"= CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd = \"\"\"echo \"run 20ns current_time quit\" >",
"should implement this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def compile(self,",
"remove #cmd += \" +nowarn3116\" cmd = \"vsim +nowarn3116 -t ps -c -do",
"raise NotImplementedError(\"Subclasses should implement this!\") def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd",
"pyfgag.dependency_builder as deps import subprocess import pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder",
"return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name",
"cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str):",
"\"run 20ns current_time quit\" > run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim %s_sim -tclbatch",
"% (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd",
"--dump-defines \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \"",
"cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR:",
"= subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went wrong\") raise Exception() def",
"= \"verilator -Wall --cc %s.sv -GTEST=2\" % sv_top_name # define cmd += \"",
"% sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) return cmds def",
"cmd) cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir) if cmd_ret: print(\"ERROR: compiles went wrong\") raise",
"def create_cmds_compile(self, top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \" # define",
"\" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s %s_sim\" % (sv_top_name, sv_top_name)",
"[cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name =",
"top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for",
"os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib =",
"= deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc %s.sv -GTEST=2\" %",
"# define cmd += \" +define+SIMULATION --dump-defines \" # includes for include in",
"def run_simu(self, top_level_file_path:str): cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" %",
"-debug typical %s -s %s_sim\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self,",
"SIMULATION\" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" -i %s \"",
"we are assuming that we are logical, and the file name is the",
"def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib =",
"[cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds =",
"# define cmd += \" +define+SIMULATION \" # includes for include in build_lib.get_full_include_dependencies():",
"create_cmds_run(self, top_level_file_path:str): # crazy warning we want to remove #cmd += \" +nowarn3116\"",
"self.sv_top_name(top_level_file_path) cmd = \"verilator -Wall --cc %s.sv -GTEST=2\" % sv_top_name # define cmd",
"cmds = self.create_cmds_run(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret =",
"SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def",
"this!\") def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\"",
"deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \" # define cmd += \" -D SIMULATION",
"-do \\\"log -r /* ; run 20 ms; quit -f \\\" %s \"",
"= os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should",
"[cmd] def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out \" return [cmd] class CompilerModelsim(SimuCompiler):",
"warning we want to remove #cmd += \" +nowarn3116\" cmd = \"vsim +nowarn3116",
"\" % top_level_file_path print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out",
"-i %s \" % include # files for sv_file in build_lib.get_full_file_dependencies(): cmd +=",
"os.path.basename(file_path) return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str):",
"ms; quit -f \\\" %s \" % self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def",
"we want to remove #cmd += \" +nowarn3116\" cmd = \"vsim +nowarn3116 -t",
"build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod def sv_top_name(top_level_file_path:str): main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path)",
"__init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder)",
"-sv \" # define cmd += \" -d SIMULATION\" # includes for include",
"return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out \" return [cmd] class",
"# define cmd += \" -d SIMULATION\" # includes for include in build_lib.get_full_include_dependencies():",
"create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd =",
"+= \" -D SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd +=",
"# define cmd += \" -D SIMULATION \" # includes for include in",
"top_level_file_path print(cmd) return [cmd] def create_cmds_run(self, top_level_file_path:str): cmd = \"vvp a.out \" return",
"cmds.append(cmd) cmds.append(\"make -j -C obj_dir -f V%s.mk V%s\" % (sv_top_name, sv_top_name) ) return",
"files for sv_file in build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file cmd",
"self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()):",
"sv_top_name return [cmd] class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str):",
"cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name return",
"self.current_folder = os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses",
"+define+SIMULATION --dump-defines \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s",
"% top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str): # crazy warning we want",
"\" % self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator, self).__init__(build_dir=build_dir) @staticmethod",
"= CompilerVerilator.get_module_name_from_path(top_level_file_path) return main_name.split(\"_main\")[0] #return CompilerVerilator.get_module_name_from_path(top_level_file_path) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib",
"-D SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" -I",
"[] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd =",
"cmds def create_cmds_run(self, top_level_file_path:str): # crazy warning we want to remove #cmd +=",
"def create_cmds_run(self, top_level_file_path:str): sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"obj_dir/V%s\" % sv_top_name return [cmd]",
"[] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmd = \"xvlog -sv \" #",
"build_lib.get_full_include_dependencies(): cmd += \" -I %s \" % include # files for sv_file",
"quit\" > run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim %s_sim -tclbatch run.tcl\" % (sv_top_name)",
"\" % top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str): # crazy warning we",
"%s \" % sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) return",
"import subprocess import pathlib class SimuCompiler(object): def __init__(self, build_dir=os.getcwd()): self.current_folder = os.getcwd() self.build_dir",
"build_lib.get_full_file_dependencies(): cmd += \" %s \" % sv_file cmd += \" --exe %s",
"%s \" % sv_file cmd += \" %s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab",
"top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) sv_top_name = self.sv_top_name(top_level_file_path) cmd = \"verilator",
"return cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd =",
"typical %s -s %s_sim\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str):",
"top_level_file_path:str): build_lib = deps.FpgaLib(self.current_folder) cmd = \"iverilog -g2012 \" # define cmd +=",
"\" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \" %",
"SIMULATION \" # includes for include in build_lib.get_full_include_dependencies(): cmd += \" -I %s",
"if cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str): # we are assuming that we",
"\\\" %s \" % self.get_module_name_from_path(top_level_file_path) return [cmd] class CompilerVerilator(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVerilator,",
"cmd += \" -I %s \" % include # files for sv_file in",
"compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd)",
"%s \" % top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s %s_sim\" % (sv_top_name,",
"top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s %s_sim\" % (sv_top_name, sv_top_name) ) return",
"cmds def create_cmds_run(self, top_level_file_path:str): sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path) cmds = [] cmd = \"\"\"echo",
"cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str): # crazy warning we want to remove",
"% top_level_file_path cmds.append(cmd) cmds.append(\"xelab -debug typical %s -s %s_sim\" % (sv_top_name, sv_top_name) )",
"self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in cmds: print(\"RUNING %s\" % cmd) cmd_ret = subprocess.call(cmd, shell=True,",
"os.getcwd() self.build_dir = build_dir pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True) def create_cmds_compile(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement",
"get_module_name_from_path(file_path:str): # we are assuming that we are logical, and the file name",
"in build_lib.get_full_include_dependencies(): cmd += \" +incdir+%s \" % include # files for sv_file",
"self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib",
"crazy warning we want to remove #cmd += \" +nowarn3116\" cmd = \"vsim",
"def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = [] build_lib =",
"NotImplementedError(\"Subclasses should implement this!\") def compile(self, top_level_file_path:str): cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path) for cmd in",
"\"vvp a.out \" return [cmd] class CompilerModelsim(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerModelsim, self).__init__(build_dir=build_dir) def",
"cmd_ret: raise Exception() @staticmethod def get_module_name_from_path(file_path:str): # we are assuming that we are",
"%s \" % top_level_file_path cmds.append(cmd) return cmds def create_cmds_run(self, top_level_file_path:str): # crazy warning",
"> run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim %s_sim -tclbatch run.tcl\" % (sv_top_name) cmds.append(cmd)",
"cmd += \" %s \" % sv_file cmd += \" %s \" %",
"+= \" %s \" % sv_file cmd += \" %s \" % top_level_file_path",
"build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\") cmd = \"vlog",
"obj_dir -f V%s.mk V%s\" % (sv_top_name, sv_top_name) ) return cmds def create_cmds_run(self, top_level_file_path:str):",
"cmds = [] build_lib = deps.FpgaLib(self.current_folder) cmds.append(\"which vlib\") cmds.append(\"vlib work\") cmds.append(\"vmap work work\")",
"class CompilerVivado(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerVivado, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): cmds = []",
"cmd = \"xvlog -sv \" # define cmd += \" -d SIMULATION\" #",
"cmd += \" -d SIMULATION\" # includes for include in build_lib.get_full_include_dependencies(): cmd +=",
"return os.path.splitext(basename)[0] class CompilerIverilog(SimuCompiler): def __init__(self, build_dir=os.getcwd()): super(CompilerIverilog, self).__init__(build_dir=build_dir) def create_cmds_compile(self, top_level_file_path:str): build_lib",
"implement this!\") def create_cmds_run(self, top_level_file_path:str): raise NotImplementedError(\"Subclasses should implement this!\") def compile(self, top_level_file_path:str):",
"includes for include in build_lib.get_full_include_dependencies(): cmd += \" -I %s \" % include",
"= \"\"\"echo \"run 20ns current_time quit\" > run.tcl \"\"\" cmds.append(cmd) cmd = \"xsim"
] |
[
"All Rights Reserved # -*- coding: utf-8 -*- from . import checks #",
"\".p1\", \".p2\", etc. after patching __patch__ = \"\" __date__ = \"$Date: 2021-08-07 03:05:11",
"patching __patch__ = \"\" __date__ = \"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug",
"add patch number here: __version__ = \"7.36.0\" # set this to \".p1\", \".p2\",",
"here: __version__ = \"7.36.0\" # set this to \".p1\", \".p2\", etc. after patching",
"# pyflakes:ignore # Don't add patch number here: __version__ = \"7.36.0\" # set",
"2021) $\" __rev__ = \"$Rev: 19289 $\" __id__ = \"$Id: __init__.py 19289 2021-08-06",
"checks # pyflakes:ignore # Don't add patch number here: __version__ = \"7.36.0\" #",
"\"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021) $\" __rev__ = \"$Rev: 19289",
"07 Aug 2021) $\" __rev__ = \"$Rev: 19289 $\" __id__ = \"$Id: __init__.py",
". import checks # pyflakes:ignore # Don't add patch number here: __version__ =",
"\".p2\", etc. after patching __patch__ = \"\" __date__ = \"$Date: 2021-08-07 03:05:11 +1200",
"= \"$Rev: 19289 $\" __id__ = \"$Id: __init__.py 19289 2021-08-06 15:05:11Z <EMAIL> $\"",
"Trust 2007-2020, All Rights Reserved # -*- coding: utf-8 -*- from . import",
"pyflakes:ignore # Don't add patch number here: __version__ = \"7.36.0\" # set this",
"set this to \".p1\", \".p2\", etc. after patching __patch__ = \"\" __date__ =",
"this to \".p1\", \".p2\", etc. after patching __patch__ = \"\" __date__ = \"$Date:",
"Aug 2021) $\" __rev__ = \"$Rev: 19289 $\" __id__ = \"$Id: __init__.py 19289",
"\"7.36.0\" # set this to \".p1\", \".p2\", etc. after patching __patch__ = \"\"",
"Reserved # -*- coding: utf-8 -*- from . import checks # pyflakes:ignore #",
"from . import checks # pyflakes:ignore # Don't add patch number here: __version__",
"IETF Trust 2007-2020, All Rights Reserved # -*- coding: utf-8 -*- from .",
"Rights Reserved # -*- coding: utf-8 -*- from . import checks # pyflakes:ignore",
"after patching __patch__ = \"\" __date__ = \"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07",
"__rev__ = \"$Rev: 19289 $\" __id__ = \"$Id: __init__.py 19289 2021-08-06 15:05:11Z <EMAIL>",
"\"\" __date__ = \"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021) $\" __rev__",
"$\" __rev__ = \"$Rev: 19289 $\" __id__ = \"$Id: __init__.py 19289 2021-08-06 15:05:11Z",
"__date__ = \"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021) $\" __rev__ =",
"= \"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021) $\" __rev__ = \"$Rev:",
"= \"\" __date__ = \"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021) $\"",
"2007-2020, All Rights Reserved # -*- coding: utf-8 -*- from . import checks",
"# -*- coding: utf-8 -*- from . import checks # pyflakes:ignore # Don't",
"coding: utf-8 -*- from . import checks # pyflakes:ignore # Don't add patch",
"patch number here: __version__ = \"7.36.0\" # set this to \".p1\", \".p2\", etc.",
"Copyright The IETF Trust 2007-2020, All Rights Reserved # -*- coding: utf-8 -*-",
"03:05:11 +1200 (Sat, 07 Aug 2021) $\" __rev__ = \"$Rev: 19289 $\" __id__",
"number here: __version__ = \"7.36.0\" # set this to \".p1\", \".p2\", etc. after",
"-*- coding: utf-8 -*- from . import checks # pyflakes:ignore # Don't add",
"__patch__ = \"\" __date__ = \"$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021)",
"2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021) $\" __rev__ = \"$Rev: 19289 $\"",
"+1200 (Sat, 07 Aug 2021) $\" __rev__ = \"$Rev: 19289 $\" __id__ =",
"Don't add patch number here: __version__ = \"7.36.0\" # set this to \".p1\",",
"The IETF Trust 2007-2020, All Rights Reserved # -*- coding: utf-8 -*- from",
"= \"7.36.0\" # set this to \".p1\", \".p2\", etc. after patching __patch__ =",
"(Sat, 07 Aug 2021) $\" __rev__ = \"$Rev: 19289 $\" __id__ = \"$Id:",
"import checks # pyflakes:ignore # Don't add patch number here: __version__ = \"7.36.0\"",
"# set this to \".p1\", \".p2\", etc. after patching __patch__ = \"\" __date__",
"etc. after patching __patch__ = \"\" __date__ = \"$Date: 2021-08-07 03:05:11 +1200 (Sat,",
"# Don't add patch number here: __version__ = \"7.36.0\" # set this to",
"__version__ = \"7.36.0\" # set this to \".p1\", \".p2\", etc. after patching __patch__",
"# Copyright The IETF Trust 2007-2020, All Rights Reserved # -*- coding: utf-8",
"to \".p1\", \".p2\", etc. after patching __patch__ = \"\" __date__ = \"$Date: 2021-08-07",
"utf-8 -*- from . import checks # pyflakes:ignore # Don't add patch number",
"-*- from . import checks # pyflakes:ignore # Don't add patch number here:"
] |
[
"misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url not in",
"= b1[0, 0, :] # b1[:, :] = (b1_v - avg_dlantents_b) * 0.9",
"1)) #--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl'",
"avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds: rnd = np.random.RandomState(seed)",
"* 0.9 + avg_dlantents_b # change C for i in range(20): c =",
"None) # [seed, layer, component] c[:, :] = avg_dlantents_c for j in range(80):",
"= rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0,",
"avg_dlantents_b) * 0.9 + avg_dlantents_b # change C for i in range(20): c",
"Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0,",
"None) b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape)",
"* 0.7 + avg_dlantents_c current_png = png + '/seedc_%d_%d' % (seed, i) +",
"0.7 + avg_dlantents_c current_png = png + '/seedc_%d_%d' % (seed, i) + '.png'",
"in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v",
"'/seedc_%d_%d' % (seed, i) + '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen,",
"- a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i",
"synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url not in _Gs_cache:",
"png + '/seedb%d_%d_%d' % (seed, j, i) + '.png' gen = Gs.components.synthesis.run(tmp, c,",
"grid_size=(1, 1)) b1_v = b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis]",
"b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in range(10): tmp = np.empty_like(b1)",
"main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir,",
"# [seed, layer, component] c_v = c[0, 0, :] c[:, :] = (c_v",
"0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v * b2_v, dtype=np.float32)",
"+ 0.1 * i * b2_v current_png = png + '/seedb%d_%d_%d' % (seed,",
"range(10): tmp = np.empty_like(b1) tmp[:, :] = b1_v + 0.1 * i *",
"component] c[:, :] = avg_dlantents_c for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2",
"os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds",
"a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in",
"for i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c,",
"= Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:, :] = avg_dlantents_c for j",
"seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds: rnd =",
"'.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1))",
"rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v",
"rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :]",
"PIL.Image import dnnlib import dnnlib.tflib as tflib import config from training import misc",
"with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs",
"= np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v - a1 /",
"print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in range(10): tmp = np.empty_like(b1) tmp[:,",
"url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs =",
"_Gs_cache = dict() def load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir)",
"= c[0, 0, :] c[:, :] = (c_v - avg_dlantents_c) * 0.7 +",
"1], grid_size=(1, 1)) b1_v = b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c =",
"pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b')",
"b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v - a1 / a2 * b1_v",
"c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0, 0,",
"config from training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url):",
"0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1",
"random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5",
"% (seed, i) + '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png,",
"current_png = png + '/seedc_%d_%d' % (seed, i) + '.png' gen = Gs.components.synthesis.run(b1,",
"= b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c,",
":] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed,",
"/ a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in range(10):",
"np.empty_like(b1) tmp[:, :] = b1_v + 0.1 * i * b2_v current_png =",
"+ '/seedb%d_%d_%d' % (seed, j, i) + '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False,",
"'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7, 8,",
"= random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :] - avg_dlantents_b)",
"(c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c current_png = png + '/seedc_%d_%d' %",
"**synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0, 0, :] c",
"c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:, :]",
"layer, component] c[:, :] = avg_dlantents_c for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1])",
"range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed,",
"misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0, 0, :] c =",
"- avg_dlantents_c) * 0.7 + avg_dlantents_c current_png = png + '/seedc_%d_%d' % (seed,",
"= (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b # change C for i",
"# Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D,",
"pickle import numpy as np import PIL.Image import dnnlib import dnnlib.tflib as tflib",
"as tflib import config from training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache =",
"= Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds: rnd = np.random.RandomState(seed) b1",
"= rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component]",
"'/seedb%d_%d_%d' % (seed, j, i) + '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1]",
"+ avg_dlantents_c current_png = png + '/seedc_%d_%d' % (seed, i) + '.png' gen",
"_Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] =",
"draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds:",
"= dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url not in _Gs_cache: with",
"if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs",
"in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) #",
"png + '/seedc_%d_%d' % (seed, i) + '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False,",
"draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23]) #---------------------------------------------------------------------------- if __name__ ==",
"= Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v =",
"0, :] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) #",
"c[:, :] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c current_png = png",
"0, :] # b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b",
"* i * b2_v current_png = png + '/seedb%d_%d_%d' % (seed, j, i)",
"for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2,",
"a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1)",
"np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v =",
"c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:, :] = avg_dlantents_c for",
"Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs",
"Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main",
"tmp[:, :] = b1_v + 0.1 * i * b2_v current_png = png",
"= 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7,",
"misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23]) #---------------------------------------------------------------------------- if __name__",
"= (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt",
"= Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- #",
"import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url not",
":] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b # change C for",
"* b2_v current_png = png + '/seedb%d_%d_%d' % (seed, j, i) + '.png'",
"avg_dlantents_c) * 0.7 + avg_dlantents_c current_png = png + '/seedc_%d_%d' % (seed, i)",
"= png + '/seedb%d_%d_%d' % (seed, j, i) + '.png' gen = Gs.components.synthesis.run(tmp,",
"np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v - a1 / a2",
":] c[:, :] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c current_png =",
"c_v = c[0, 0, :] c[:, :] = (c_v - avg_dlantents_c) * 0.7",
"in range(10): tmp = np.empty_like(b1) tmp[:, :] = b1_v + 0.1 * i",
"(seed, j, i) + '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png,",
"tmp = np.empty_like(b1) tmp[:, :] = b1_v + 0.1 * i * b2_v",
"rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None)",
"= rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0,",
"Gs, seeds = [2, 7, 8, 11, 23]) #---------------------------------------------------------------------------- if __name__ == \"__main__\":",
"gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #---------------------------------------------------------------------------",
"tflib import config from training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict()",
"b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b # change C",
"= Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c =",
"None) b1_v = b1[0, 0, :] # b1[:, :] = (b1_v - avg_dlantents_b)",
"Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v = c[0, 0, :] c[:, :]",
"+ '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1,",
"for i in range(10): tmp = np.empty_like(b1) tmp[:, :] = b1_v + 0.1",
"import config from training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def",
"from training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if",
"import pickle import numpy as np import PIL.Image import dnnlib import dnnlib.tflib as",
":] = avg_dlantents_c for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis]",
"(seed, i) + '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1,",
"i) + '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1],",
"network_pkl = 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2,",
"= c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v = c[0,",
"= b2_v - a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v))",
"exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds =",
"= Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v = c[0, 0, :] c[:,",
"#--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G,",
"seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1,",
"random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0,",
"- avg_dlantents_b) * 0.9 + avg_dlantents_b # change C for i in range(20):",
"# [seed, layer, component] c[:, :] = avg_dlantents_c for j in range(80): random_b2",
"i * b2_v current_png = png + '/seedb%d_%d_%d' % (seed, j, i) +",
"training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url",
"as np import PIL.Image import dnnlib import dnnlib.tflib as tflib import config from",
"+ '/seedc_%d_%d' % (seed, i) + '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1]",
"j, i) + '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1,",
"dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs return",
"avg_dlantents_c for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 =",
"layer, component] c_v = c[0, 0, :] c[:, :] = (c_v - avg_dlantents_c)",
"= Gs.get_var('dlatent_avg_c') for seed in seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1",
"avg_dlantents_c current_png = png + '/seedc_%d_%d' % (seed, i) + '.png' gen =",
"c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:, :] = avg_dlantents_c",
"change C for i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c",
"process a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32)",
"misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf()",
"b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0,",
"dnnlib.tflib as tflib import config from training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache",
"i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None)",
"print(a2) b2_v = b2_v - a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v",
"+ avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2",
"b1_v + 0.1 * i * b2_v current_png = png + '/seedb%d_%d_%d' %",
"0.9 + avg_dlantents_b # change C for i in range(20): c = rnd.randn(Gs.input_shapes[1][1])",
"dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url,",
"np import PIL.Image import dnnlib import dnnlib.tflib as tflib import config from training",
"import dnnlib import dnnlib.tflib as tflib import config from training import misc synthesis_kwargs",
"* b2_v)) for i in range(10): tmp = np.empty_like(b1) tmp[:, :] = b1_v",
"cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url]",
"b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :] #",
"= b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :] # b1[:,",
"in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url]",
"random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :] - avg_dlantents_b) *",
"_Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c",
"component] c_v = c[0, 0, :] c[:, :] = (c_v - avg_dlantents_c) *",
"np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v",
"b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v",
"= (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c current_png = png + '/seedc_%d_%d'",
"b1_v = b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c =",
"= png + '/seedc_%d_%d' % (seed, i) + '.png' gen = Gs.components.synthesis.run(b1, c,",
"# gram-schmidt process a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v *",
"i) + '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1],",
"grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl =",
"+ avg_dlantents_b # change C for i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c",
"_G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png, Gs,",
"a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in range(10): tmp",
"D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23])",
"randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0, 0, :]",
"= np.empty_like(b1) tmp[:, :] = b1_v + 0.1 * i * b2_v current_png",
"dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v -",
"gram-schmidt process a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v,",
"seeds = [2, 7, 8, 11, 23]) #---------------------------------------------------------------------------- if __name__ == \"__main__\": main()",
"= pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b =",
"[seed, layer, component] c[:, :] = avg_dlantents_c for j in range(80): random_b2 =",
"= dict() def load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as",
"import numpy as np import PIL.Image import dnnlib import dnnlib.tflib as tflib import",
"rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:,",
"+ '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1,",
"dtype=np.float32) print(a1) print(a2) b2_v = b2_v - a1 / a2 * b1_v print(b1_v.shape)",
"None) # [seed, layer, component] c_v = c[0, 0, :] c[:, :] =",
"range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v =",
"* b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v - a1 / a2 *",
"a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v - a1",
"_D, Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds):",
"b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :] # b1[:, :]",
"import PIL.Image import dnnlib import dnnlib.tflib as tflib import config from training import",
"in seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 =",
"Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b",
"print(np.sum(b1_v * b2_v)) for i in range(10): tmp = np.empty_like(b1) tmp[:, :] =",
"* b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in range(10): tmp =",
"dnnlib import dnnlib.tflib as tflib import config from training import misc synthesis_kwargs =",
"avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2 =",
"0.1 * i * b2_v current_png = png + '/seedb%d_%d_%d' % (seed, j,",
"G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11,",
"Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23]) #----------------------------------------------------------------------------",
"* 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v * b2_v,",
"= np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v",
"= avg_dlantents_c for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2",
"= Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 +",
"c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v = c[0, 0, :]",
"dict() def load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:",
"seed in seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1",
"c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer,",
"current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir,",
"Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :] # b1[:, :] = (b1_v -",
"[seed, layer, component] c_v = c[0, 0, :] c[:, :] = (c_v -",
"os import pickle import numpy as np import PIL.Image import dnnlib import dnnlib.tflib",
"return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for",
":] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c current_png = png +",
"program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs =",
"b2_v - a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for",
"def load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G,",
":] # b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b #",
"numpy as np import PIL.Image import dnnlib import dnnlib.tflib as tflib import config",
"print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v",
"for seed in seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis]",
"= Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :] # b1[:, :] = (b1_v",
"current_png = png + '/seedb%d_%d_%d' % (seed, j, i) + '.png' gen =",
"drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c",
"load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D,",
"= misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23]) #---------------------------------------------------------------------------- if",
"- avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v",
"drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True)",
"**synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def main():",
"# change C for i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis]",
"(b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b # change C for i in",
"b1_v = b1[0, 0, :] # b1[:, :] = (b1_v - avg_dlantents_b) *",
"% (seed, j, i) + '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen,",
"not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f)",
"avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1])",
"(random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process",
"* b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v =",
"print(a1) print(a2) b2_v = b2_v - a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape)",
"Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:, :] = avg_dlantents_c for j in",
"c[:, :] = avg_dlantents_c for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 =",
"j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None)",
"b2_v)) for i in range(10): tmp = np.empty_like(b1) tmp[:, :] = b1_v +",
"gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v",
"b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None)",
":] = b1_v + 0.1 * i * b2_v current_png = png +",
"randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def",
"avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v *",
"1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl",
"c[0, 0, :] c[:, :] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c",
"# b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b # change",
"current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1])",
"= c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:, :] =",
"b2_v current_png = png + '/seedb%d_%d_%d' % (seed, j, i) + '.png' gen",
"Gs.get_var('dlatent_avg_c') for seed in seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 =",
"= b1_v + 0.1 * i * b2_v current_png = png + '/seedb%d_%d_%d'",
"import dnnlib.tflib as tflib import config from training import misc synthesis_kwargs = dict(minibatch_size=8)",
"1)) b1_v = b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c",
"0, :] c[:, :] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c current_png",
"'.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1))",
"C for i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c =",
"b2_v = b2_v - a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v *",
"b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) #",
":] - avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 =",
"Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b",
"random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :] -",
"def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl)",
"as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def",
"c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v = c[0, 0,",
"= np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2)",
"Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c')",
"print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in range(10): tmp = np.empty_like(b1) tmp[:, :]",
"_Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed",
"import os import pickle import numpy as np import PIL.Image import dnnlib import",
"= [2, 7, 8, 11, 23]) #---------------------------------------------------------------------------- if __name__ == \"__main__\": main() #----------------------------------------------------------------------------",
"avg_dlantents_b # change C for i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c =",
"Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds: rnd = np.random.RandomState(seed) b1 =",
"c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program.",
"rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :]",
"i in range(10): tmp = np.empty_like(b1) tmp[:, :] = b1_v + 0.1 *",
"b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :] # b1[:, :] =",
"def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in",
"f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png,",
"c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v =",
"Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds: rnd",
"b1[0, 0, :] # b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 +",
"tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs,"
] |
[
"sys for line in sys.stdin: if line == \"0.00\\n\": break; c = float(line.strip())",
"\"0.00\\n\": break; c = float(line.strip()) (curr_n, curr_len) = (0, 0.0) while c >",
"float(line.strip()) (curr_n, curr_len) = (0, 0.0) while c > curr_len: curr_n += 1",
"line == \"0.00\\n\": break; c = float(line.strip()) (curr_n, curr_len) = (0, 0.0) while",
"(0, 0.0) while c > curr_len: curr_n += 1 curr_len += 1.0/(1 +",
"= float(line.strip()) (curr_n, curr_len) = (0, 0.0) while c > curr_len: curr_n +=",
"while c > curr_len: curr_n += 1 curr_len += 1.0/(1 + curr_n) print",
"if line == \"0.00\\n\": break; c = float(line.strip()) (curr_n, curr_len) = (0, 0.0)",
"c > curr_len: curr_n += 1 curr_len += 1.0/(1 + curr_n) print curr_n,",
"for line in sys.stdin: if line == \"0.00\\n\": break; c = float(line.strip()) (curr_n,",
"in sys.stdin: if line == \"0.00\\n\": break; c = float(line.strip()) (curr_n, curr_len) =",
"sys.stdin: if line == \"0.00\\n\": break; c = float(line.strip()) (curr_n, curr_len) = (0,",
"(curr_n, curr_len) = (0, 0.0) while c > curr_len: curr_n += 1 curr_len",
"c = float(line.strip()) (curr_n, curr_len) = (0, 0.0) while c > curr_len: curr_n",
"== \"0.00\\n\": break; c = float(line.strip()) (curr_n, curr_len) = (0, 0.0) while c",
"break; c = float(line.strip()) (curr_n, curr_len) = (0, 0.0) while c > curr_len:",
"0.0) while c > curr_len: curr_n += 1 curr_len += 1.0/(1 + curr_n)",
"line in sys.stdin: if line == \"0.00\\n\": break; c = float(line.strip()) (curr_n, curr_len)",
"import sys for line in sys.stdin: if line == \"0.00\\n\": break; c =",
"curr_len) = (0, 0.0) while c > curr_len: curr_n += 1 curr_len +=",
"> curr_len: curr_n += 1 curr_len += 1.0/(1 + curr_n) print curr_n, \"card(s)\"",
"= (0, 0.0) while c > curr_len: curr_n += 1 curr_len += 1.0/(1"
] |
[
"ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert",
"in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0)",
"+ 1) value = (h << 8) + l return value def read_word_2c(self,",
"range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def",
"if (val >= 0x8000): return -((65535 - val) + 1) else: return val",
"beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert,",
"= imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save() if draw: imu.debug.draw() return",
"init_data = [] print(\"offset calc start...\") for count in range(0, 200): init_data.append(self.read_raw()) offset",
"power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z #",
"Q = np.array([[q, q, 0], [q, q, 0], [0, 0, 0]]) # process",
"via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2 =",
"count in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset,",
"# Aktivieren, um das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full",
"müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1],",
"= self.read_word(reg) if (val >= 0x8000): return -((65535 - val) + 1) else:",
"# bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z # setzt Accelerometer",
"return -((65535 - val) + 1) else: return val def read_raw(self): if self.sim_mode",
"- val) + 1) else: return val def read_raw(self): if self.sim_mode == True:",
"gyroskop_zout_skaliert def offset_calc(self): init_data = [] print(\"offset calc start...\") for count in range(0,",
"sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return",
"break else: while KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run()",
"import numpy as np from classes import Debug, KalmanFilter import smbus bus =",
"bus.read_byte_data(address, reg + 1) # h = bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address,",
"auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) #",
"- self.offset) ''' def test_imu(save=False, draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref =",
"val) + 1) else: return val def read_raw(self): if self.sim_mode == True: return",
"/ 50.0 # state transition model, A F = np.array([[1, dt, 0], [0,",
"KeyboardInterrupt: break if save: imu.debug.save() if draw: imu.debug.draw() return # if __name__== \"__main\":",
"= sim_mode self.kf = self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc() #self.port =",
"is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break",
"= self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc() #self.port = port self.imu_config() def",
"dt], [0, 0, 1]]) H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation",
"8) + l return value def read_word_2c(self, reg): val = self.read_word(reg) if (val",
"= beschleunigung_xout / 16384.0 # value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout /",
"self.raw = self.read_raw() self.offset = self.offset_calc() #self.port = port self.imu_config() def filter_config(self): #",
"meine C matrix für den Ausgang, also müsste das mittlere die geschwindigkeit sein",
"= bus.read_byte_data(address, reg + 1) # h = bus.read_byte_data(self.address, reg) # l =",
"self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False):",
"for i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1)",
"= self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor documentation beschleunigung_yout_skaliert",
"200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert",
"# h = bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg + 1) value",
"Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode self.kf =",
"self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor",
"ready..\") def read_word(self, reg): h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg +",
"numpy as np from classes import Debug, KalmanFilter import smbus bus = smbus.SMBus(2)",
"Aktivieren, um das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power",
"def read_raw(self): if self.sim_mode == True: return 100, 200, 20 else: beschleunigung_xout =",
"reg): h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg + 1) # h",
"= beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert",
"Ausgang, also müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \",",
"C q = 0.05 Q = np.array([[q, q, 0], [q, q, 0], [0,",
"np.median(offset, axis=0) def kalman_filter(self, z): # das ist meine C matrix für den",
"_z # setzt Accelerometer Full Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000)",
"1) # h = bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg + 1)",
"Reg 28 ACCEL_CONFIG2 = 0x1D # Reg 29 class Imu(Debug, KalmanFilter): def __init__(self,",
"value def read_word_2c(self, reg): val = self.read_word(reg) if (val >= 0x8000): return -((65535",
"''' def test_imu(save=False, draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref = int(round(time.time() *",
"+-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht",
"state transition model, A F = np.array([[1, dt, 0], [0, 1, dt], [0,",
"from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131",
"Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if imu.sim_mode: for i in range(0, 1000):",
"init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self, z):",
"sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config() self.raw = self.read_raw()",
"def read_word_2c(self, reg): val = self.read_word(reg) if (val >= 0x8000): return -((65535 -",
"imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt",
"200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self,",
"range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break",
"kalman filter dt = 1.0 / 50.0 # state transition model, A F",
"try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save()",
"dt = 1.0 / 50.0 # state transition model, A F = np.array([[1,",
"bus.read_byte_data(self.address, reg + 1) value = (h << 8) + l return value",
"beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value",
"\", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset) '''",
"ms ~50Hz #print(\"IMU config ready..\") def read_word(self, reg): h = bus.read_byte_data(address, reg) l",
"def process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False): print(\"stat testing...\") imu",
"beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout",
"1000)) if imu.sim_mode: for i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z =",
"def imu_config(self): # Aktivieren, um das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0)",
"+ 1) # h = bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg +",
"1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else:",
"self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def",
"dt, 0], [0, 1, dt], [0, 0, 1]]) H = np.array([0, 0, 1]).reshape(1,",
"= gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = []",
"0], [0, 0, 0]]) # process noise R = np.array([0.8]).reshape(1, 1) # observation",
"4, also 19,8 ms ~50Hz #print(\"IMU config ready..\") def read_word(self, reg): h =",
"def filter_config(self): # paramter for kalman filter dt = 1.0 / 50.0 #",
"bus.write_byte_data(address, power_mgmt_1, 0) # full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled",
"KeyboardInterrupt: break else: while KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z =",
"h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg + 1) # h =",
"dem Wert 4, also 19,8 ms ~50Hz #print(\"IMU config ready..\") def read_word(self, reg):",
"= bus.read_byte_data(self.address, reg + 1) value = (h << 8) + l return",
"return np.median(offset, axis=0) def kalman_filter(self, z): # das ist meine C matrix für",
"np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]) H = np.array([0, 0,",
"imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt is",
"beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = [] print(\"offset calc start...\") for count in",
"val def read_raw(self): if self.sim_mode == True: return 100, 200, 20 else: beschleunigung_xout",
"0], [0, 1, dt], [0, 0, 1]]) H = np.array([0, 0, 1]).reshape(1, 3)",
"def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config() self.raw",
"0, 0]]) # process noise R = np.array([0.8]).reshape(1, 1) # observation noise return",
"np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): #",
"except KeyboardInterrupt: break if save: imu.debug.save() if draw: imu.debug.draw() return # if __name__==",
"bis _z # setzt Accelerometer Full Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG,",
"= imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt is not True:",
"100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47)",
"np.array([[q, q, 0], [q, q, 0], [0, 0, 0]]) # process noise R",
"-*- coding: utf-8 -*- import time import numpy as np from classes import",
"def read_word(self, reg): h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg + 1)",
"else: return val def read_raw(self): if self.sim_mode == True: return 100, 200, 20",
"if save: imu.debug.save() if draw: imu.debug.draw() return # if __name__== \"__main\": test_imu(save=True) '''",
"imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save() if draw:",
"# bus = smbus.SMBus(0) fuer Revision 1 address = 0x68 # via i2cdetect",
"(val >= 0x8000): return -((65535 - val) + 1) else: return val def",
"3) # transponieren #observation model C q = 0.05 Q = np.array([[q, q,",
"bus = smbus.SMBus(0) fuer Revision 1 address = 0x68 # via i2cdetect power_mgmt_1",
"time import numpy as np from classes import Debug, KalmanFilter import smbus bus",
"self.kf = self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc() #self.port = port self.imu_config()",
"ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D # Reg 29 class",
"documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert,",
"offset_calc(self): init_data = [] print(\"offset calc start...\") for count in range(0, 200): init_data.append(self.read_raw())",
"fuer Revision 1 address = 0x68 # via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG",
"0, 1]]) H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C",
"kalman_filter(self, z): # das ist meine C matrix für den Ausgang, also müsste",
"process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False): print(\"stat testing...\") imu =",
"gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = [] print(\"offset",
"[] print(\"offset calc start...\") for count in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data)",
"t_ref = int(round(time.time() * 1000)) if imu.sim_mode: for i in range(0, 1000): try:",
"val = self.read_word(reg) if (val >= 0x8000): return -((65535 - val) + 1)",
"16384.0 # value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert =",
"#print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset)",
"den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4, also 19,8 ms",
"self.debug = Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config() self.raw = self.read_raw() self.offset",
"True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save:",
"h = bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg + 1) value =",
"# setzt Accelerometer Full Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) #",
"= 0x1D # Reg 29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug =",
"value = (h << 8) + l return value def read_word_2c(self, reg): val",
"else: while KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref)",
"noise R = np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R)",
"ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power mode # bus.write_byte_data(address, power_mgmt_2,",
"reg): val = self.read_word(reg) if (val >= 0x8000): return -((65535 - val) +",
"0x8000): return -((65535 - val) + 1) else: return val def read_raw(self): if",
"axis=0) def kalman_filter(self, z): # das ist meine C matrix für den Ausgang,",
"(h << 8) + l return value def read_word_2c(self, reg): val = self.read_word(reg)",
"0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D # Reg 29 class Imu(Debug, KalmanFilter):",
"filter_config(self): # paramter for kalman filter dt = 1.0 / 50.0 # state",
"= bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg + 1) value = (h",
"power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z # setzt Accelerometer Full Scale",
"return 100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout =",
"classes import Debug, KalmanFilter import smbus bus = smbus.SMBus(2) # bus = smbus.SMBus(0)",
"imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save() if",
"Debug, KalmanFilter import smbus bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision",
"q, 0], [0, 0, 0]]) # process noise R = np.array([0.8]).reshape(1, 1) #",
"0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4,",
"testing...\") imu = Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if imu.sim_mode: for i",
"imu = Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if imu.sim_mode: for i in",
"1.0 / 50.0 # state transition model, A F = np.array([[1, dt, 0],",
"config ready..\") def read_word(self, reg): h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg",
"= smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1 address = 0x68 #",
"port self.imu_config() def filter_config(self): # paramter for kalman filter dt = 1.0 /",
"self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc() #self.port = port self.imu_config() def filter_config(self):",
"ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4, also 19,8 ms ~50Hz #print(\"IMU config",
"self.read_word(reg) if (val >= 0x8000): return -((65535 - val) + 1) else: return",
"beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def",
"imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt is not True: try: imu.debug.V_X,",
"bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1 address = 0x68",
"0x68 # via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C # Reg 28",
"29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode",
"time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y,",
"self.sim_mode == True: return 100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout =",
"= self.offset_calc() #self.port = port self.imu_config() def filter_config(self): # paramter for kalman filter",
"return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False): print(\"stat",
"F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]) H =",
"~50Hz #print(\"IMU config ready..\") def read_word(self, reg): h = bus.read_byte_data(address, reg) l =",
"setzt Accelerometer Full Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt",
">= 0x8000): return -((65535 - val) + 1) else: return val def read_raw(self):",
"from classes import Debug, KalmanFilter import smbus bus = smbus.SMBus(2) # bus =",
"__init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config() self.raw =",
"Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power mode # bus.write_byte_data(address,",
"self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 #",
"1 address = 0x68 # via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C",
"1) # observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): # Aktivieren,",
"calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self, z): # das ist meine C",
"Full Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter",
"bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg + 1) # h = bus.read_byte_data(self.address, reg)",
"/ 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = [] print(\"offset calc",
"[q, q, 0], [0, 0, 0]]) # process noise R = np.array([0.8]).reshape(1, 1)",
"while KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except",
"Reg 29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode =",
"process noise R = np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F, H=H, Q=Q,",
"KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): # Aktivieren, um das Modul ansprechen zu",
"20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert =",
"for count in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return",
"131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = [] print(\"offset calc start...\")",
"das ist meine C matrix für den Ausgang, also müsste das mittlere die",
"reg + 1) value = (h << 8) + l return value def",
"= 0x68 # via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C # Reg",
"mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z # setzt",
"19,8 ms ~50Hz #print(\"IMU config ready..\") def read_word(self, reg): h = bus.read_byte_data(address, reg)",
"self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False,",
"smbus.SMBus(0) fuer Revision 1 address = 0x68 # via i2cdetect power_mgmt_1 = 0x6b",
"gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor documentation",
"das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power mode #",
"* 1000)) if imu.sim_mode: for i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z",
"power_mgmt_1, 0) # full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z,",
"observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): # Aktivieren, um das",
"0]]) # process noise R = np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F,",
"l = bus.read_byte_data(address, reg + 1) # h = bus.read_byte_data(self.address, reg) # l",
"Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address,",
"imu.sim_mode: for i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref)",
"# state transition model, A F = np.array([[1, dt, 0], [0, 1, dt],",
"KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config()",
"ACCEL_CONFIG2 = 0x1D # Reg 29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug",
"0) # full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x",
"q = 0.05 Q = np.array([[q, q, 0], [q, q, 0], [0, 0,",
"bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem",
"reg) # l = bus.read_byte_data(self.address, reg + 1) value = (h << 8)",
"self.read_raw() self.offset = self.offset_calc() #self.port = port self.imu_config() def filter_config(self): # paramter for",
"geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def",
"setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4, also 19,8",
"[0, 0, 1]]) H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model",
"self.offset) ''' def test_imu(save=False, draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref = int(round(time.time()",
"bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z # setzt Accelerometer Full",
"reg) l = bus.read_byte_data(address, reg + 1) # h = bus.read_byte_data(self.address, reg) #",
"(hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100)",
"for kalman filter dt = 1.0 / 50.0 # state transition model, A",
"break if save: imu.debug.save() if draw: imu.debug.draw() return # if __name__== \"__main\": test_imu(save=True)",
"imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save() if draw: imu.debug.draw() return # if",
"transition model, A F = np.array([[1, dt, 0], [0, 1, dt], [0, 0,",
"# observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): # Aktivieren, um",
"/ 16384.0 # value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert",
"16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data",
"beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = [] print(\"offset calc start...\") for count",
"R = np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def",
"#disabele=1, disabled accel_z, gyro_x bis _z # setzt Accelerometer Full Scale Select (hier",
"filter dt = 1.0 / 50.0 # state transition model, A F =",
"imu_config(self): # Aktivieren, um das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) #",
"koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1,",
"noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): # Aktivieren, um das Modul",
"def kalman_filter(self, z): # das ist meine C matrix für den Ausgang, also",
"# setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4, also",
"# transponieren #observation model C q = 0.05 Q = np.array([[q, q, 0],",
"draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if imu.sim_mode:",
"# value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout",
"np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self, z): # das ist",
"as np from classes import Debug, KalmanFilter import smbus bus = smbus.SMBus(2) #",
"return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): # Aktivieren, um das Modul ansprechen",
"i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D",
"True: return 100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout",
"test_imu(save=False, draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if",
"1) else: return val def read_raw(self): if self.sim_mode == True: return 100, 200,",
"i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except",
"self.sim_mode = sim_mode self.kf = self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc() #self.port",
"np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return",
"l return value def read_word_2c(self, reg): val = self.read_word(reg) if (val >= 0x8000):",
"# paramter for kalman filter dt = 1.0 / 50.0 # state transition",
"= port self.imu_config() def filter_config(self): # paramter for kalman filter dt = 1.0",
"matrix für den Ausgang, also müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict())",
"import smbus bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1 address",
"utf-8 -*- import time import numpy as np from classes import Debug, KalmanFilter",
"KalmanFilter import smbus bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1",
"= smbus.SMBus(0) fuer Revision 1 address = 0x68 # via i2cdetect power_mgmt_1 =",
"z): # das ist meine C matrix für den Ausgang, also müsste das",
"1) value = (h << 8) + l return value def read_word_2c(self, reg):",
"mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return",
"= np.array([[q, q, 0], [q, q, 0], [0, 0, 0]]) # process noise",
"[0, 0, 0]]) # process noise R = np.array([0.8]).reshape(1, 1) # observation noise",
"= int(round(time.time() * 1000)) if imu.sim_mode: for i in range(0, 1000): try: imu.debug.V_X,",
"accel_z, gyro_x bis _z # setzt Accelerometer Full Scale Select (hier auf +-2g)",
"= 1.0 / 50.0 # state transition model, A F = np.array([[1, dt,",
"start...\") for count in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset)",
"# Reg 28 ACCEL_CONFIG2 = 0x1D # Reg 29 class Imu(Debug, KalmanFilter): def",
"H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C q =",
"print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if imu.sim_mode: for",
"0x1D # Reg 29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu')",
"return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False)",
"ist meine C matrix für den Ausgang, also müsste das mittlere die geschwindigkeit",
"self.offset_calc() #self.port = port self.imu_config() def filter_config(self): # paramter for kalman filter dt",
"coding: utf-8 -*- import time import numpy as np from classes import Debug,",
"-((65535 - val) + 1) else: return val def read_raw(self): if self.sim_mode ==",
"bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg + 1) value = (h <<",
"also 19,8 ms ~50Hz #print(\"IMU config ready..\") def read_word(self, reg): h = bus.read_byte_data(address,",
"try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while",
"except KeyboardInterrupt: break else: while KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z",
"0b00000100) # entspricht dem Wert 4, also 19,8 ms ~50Hz #print(\"IMU config ready..\")",
"value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout /",
"# -*- coding: utf-8 -*- import time import numpy as np from classes",
"sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self):",
"self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False): print(\"stat testing...\")",
"= 0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D # Reg 29 class Imu(Debug,",
"für den Ausgang, also müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z)",
"H=H, Q=Q, R=R) def imu_config(self): # Aktivieren, um das Modul ansprechen zu koennen",
"import time import numpy as np from classes import Debug, KalmanFilter import smbus",
"offset = np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self, z): #",
"= Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if imu.sim_mode: for i in range(0,",
"# full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis",
"imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt is not True: try:",
"0], [q, q, 0], [0, 0, 0]]) # process noise R = np.array([0.8]).reshape(1,",
"read_word(self, reg): h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg + 1) #",
"smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1 address = 0x68 # via",
"return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = [] print(\"offset calc start...\") for",
"def test_imu(save=False, draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref = int(round(time.time() * 1000))",
"imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save() if draw: imu.debug.draw()",
"if self.sim_mode == True: return 100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout",
"0, 1]).reshape(1, 3) # transponieren #observation model C q = 0.05 Q =",
"address = 0x68 # via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C #",
"not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if",
"== True: return 100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d)",
"50.0 # state transition model, A F = np.array([[1, dt, 0], [0, 1,",
"smbus bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1 address =",
"sim_mode self.kf = self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc() #self.port = port",
"0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z # setzt Accelerometer Full Scale Select",
"/ 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self):",
"np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C q = 0.05 Q",
"= np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self):",
"= Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config() self.raw = self.read_raw() self.offset =",
"Q=Q, R=R) def imu_config(self): # Aktivieren, um das Modul ansprechen zu koennen bus.write_byte_data(address,",
"return val def read_raw(self): if self.sim_mode == True: return 100, 200, 20 else:",
"calc start...\") for count in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished calc..\")",
"def offset_calc(self): init_data = [] print(\"offset calc start...\") for count in range(0, 200):",
"den Ausgang, also müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter:",
"+ 1) else: return val def read_raw(self): if self.sim_mode == True: return 100,",
"= self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0",
"l = bus.read_byte_data(self.address, reg + 1) value = (h << 8) + l",
"# Reg 29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode",
"int(round(time.time() * 1000)) if imu.sim_mode: for i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y,",
"self.imu_config() def filter_config(self): # paramter for kalman filter dt = 1.0 / 50.0",
"Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc()",
"# via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2",
"self.offset = self.offset_calc() #self.port = port self.imu_config() def filter_config(self): # paramter for kalman",
"beschleunigung_xout / 16384.0 # value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0",
"# das ist meine C matrix für den Ausgang, also müsste das mittlere",
"full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z",
"= bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg + 1) # h = bus.read_byte_data(self.address,",
"= self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from",
"die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1]",
"Accelerometer Full Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den",
"imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save() if draw: imu.debug.draw() return #",
"-*- import time import numpy as np from classes import Debug, KalmanFilter import",
"bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4, also 19,8 ms ~50Hz #print(\"IMU",
"if imu.sim_mode: for i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run()",
"0x6b ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D # Reg 29",
"model, A F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]])",
"also müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0],",
"class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode self.kf",
"gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data =",
"28 ACCEL_CONFIG2 = 0x1D # Reg 29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False):",
"self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor documentation beschleunigung_yout_skaliert =",
"= np.array(init_data) print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self, z): # das",
"1]).reshape(1, 3) # transponieren #observation model C q = 0.05 Q = np.array([[q,",
"Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4, also 19,8 ms ~50Hz",
"#print(\"IMU config ready..\") def read_word(self, reg): h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address,",
"read_word_2c(self, reg): val = self.read_word(reg) if (val >= 0x8000): return -((65535 - val)",
"beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout /",
"q, 0], [q, q, 0], [0, 0, 0]]) # process noise R =",
"imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt is not",
"gyro_x bis _z # setzt Accelerometer Full Scale Select (hier auf +-2g) bus.write_byte_data(address,",
"paramter for kalman filter dt = 1.0 / 50.0 # state transition model,",
"model C q = 0.05 Q = np.array([[q, q, 0], [q, q, 0],",
"print(\"offset calc start...\") for count in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print(\"finished",
"self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False): print(\"stat testing...\") imu = Imu(sim_mode=False) t_ref",
"Revision 1 address = 0x68 # via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG =",
"# entspricht dem Wert 4, also 19,8 ms ~50Hz #print(\"IMU config ready..\") def",
"zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111)",
"C matrix für den Ausgang, also müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H,",
"# process noise R = np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F, H=H,",
"1]]) H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C q",
"0.05 Q = np.array([[q, q, 0], [q, q, 0], [0, 0, 0]]) #",
"1, dt], [0, 0, 1]]) H = np.array([0, 0, 1]).reshape(1, 3) # transponieren",
"R=R) def imu_config(self): # Aktivieren, um das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1,",
"+ l return value def read_word_2c(self, reg): val = self.read_word(reg) if (val >=",
"= (h << 8) + l return value def read_word_2c(self, reg): val =",
"= [] print(\"offset calc start...\") for count in range(0, 200): init_data.append(self.read_raw()) offset =",
"um das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power mode",
"#print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self, z): # das ist meine C matrix",
"# l = bus.read_byte_data(self.address, reg + 1) value = (h << 8) +",
"read_raw(self): if self.sim_mode == True: return 100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b)",
"return value def read_word_2c(self, reg): val = self.read_word(reg) if (val >= 0x8000): return",
"print(\"finished calc..\") #print(\"offset:\",offset) return np.median(offset, axis=0) def kalman_filter(self, z): # das ist meine",
"self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() -",
"reg + 1) # h = bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg",
"<< 8) + l return value def read_word_2c(self, reg): val = self.read_word(reg) if",
"= np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C q = 0.05",
"self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw()",
"np from classes import Debug, KalmanFilter import smbus bus = smbus.SMBus(2) # bus",
"Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2,",
"= 0.05 Q = np.array([[q, q, 0], [q, q, 0], [0, 0, 0]])",
"[0, 1, dt], [0, 0, 1]]) H = np.array([0, 0, 1]).reshape(1, 3) #",
"= 0x6b ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D # Reg",
"transponieren #observation model C q = 0.05 Q = np.array([[q, q, 0], [q,",
"Wert 4, also 19,8 ms ~50Hz #print(\"IMU config ready..\") def read_word(self, reg): h",
"entspricht dem Wert 4, also 19,8 ms ~50Hz #print(\"IMU config ready..\") def read_word(self,",
"else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout",
"import Debug, KalmanFilter import smbus bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer",
"= np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]) H = np.array([0,",
"#self.port = port self.imu_config() def filter_config(self): # paramter for kalman filter dt =",
"power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D #",
"= self.read_raw() self.offset = self.offset_calc() #self.port = port self.imu_config() def filter_config(self): # paramter",
"#observation model C q = 0.05 Q = np.array([[q, q, 0], [q, q,",
"disabled accel_z, gyro_x bis _z # setzt Accelerometer Full Scale Select (hier auf",
"in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt:",
"A F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]) H",
"KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt:",
"das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print(\"kalmanfilter: \", self.kf.x[0], self.kf.x[1], self.kf.x[2])"
] |
[] |
[
"class Worker(object): \"\"\"A Worker node runs Tasks that are provided by the Master",
"getLogger(\"{} {} {}\".format(__name__, name, rank)) def send(self, message): \"\"\"Send the given message to",
"Task {}\".format(self.task._uid)) if self.task.skip is False: # This should always be False self.task.run()",
"def __init__(self, mpi, master=MASTER): \"\"\"Construct a Worker with the global MPI object. :param",
"master=MASTER): \"\"\"Construct a Worker with the global MPI object. :param mpi: the global",
":type message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and act upon",
"int, optional \"\"\" self.mpi = mpi self.comm = mpi.COMM_WORLD self.status = mpi.Status() self.tag",
"act upon a message/Task from the Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status)",
"Master node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct a Worker with the global MPI",
"tags.WORK: self.task = task def run(self): \"\"\"Run the current Task unless exiting. Sends",
"rank)) def send(self, message): \"\"\"Send the given message to the Master node. :param",
"self.tag == tags.WORK: self.task = task def run(self): \"\"\"Run the current Task unless",
"if self.tag == tags.EXIT: return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is",
"the Master node. :param message: the message to send :type message: object \"\"\"",
"send :type message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and act",
"be False self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop",
"current Task unless exiting. Sends the PID and UID to the Master after",
"\"\"\"Represents a Worker node.\"\"\" from ..util import tags, MASTER class Worker(object): \"\"\"A Worker",
"id of the Master node, defaults to MASTER (0) :type master: int, optional",
"mpi, master=MASTER): \"\"\"Construct a Worker with the global MPI object. :param mpi: the",
"utf-8 -*- \"\"\"Represents a Worker node.\"\"\" from ..util import tags, MASTER class Worker(object):",
"message/Task from the Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag()",
"..util import tags, MASTER class Worker(object): \"\"\"A Worker node runs Tasks that are",
"self.master = master if __debug__: name = mpi.Get_processor_name() rank = self.comm.Get_rank() from logging",
"getLogger self.log = getLogger(\"{} {} {}\".format(__name__, name, rank)) def send(self, message): \"\"\"Send the",
"# This should always be False self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid,",
"\"\"\"Run the current Task unless exiting. Sends the PID and UID to the",
"Master node, defaults to MASTER (0) :type master: int, optional \"\"\" self.mpi =",
"self.status.Get_tag() if self.tag == tags.WORK: self.task = task def run(self): \"\"\"Run the current",
"exiting. Sends the PID and UID to the Master after completion.\"\"\" if self.tag",
"= getLogger(\"{} {} {}\".format(__name__, name, rank)) def send(self, message): \"\"\"Send the given message",
"if self.tag == tags.WORK: self.task = task def run(self): \"\"\"Run the current Task",
"__init__(self, mpi, master=MASTER): \"\"\"Construct a Worker with the global MPI object. :param mpi:",
"self.log = getLogger(\"{} {} {}\".format(__name__, name, rank)) def send(self, message): \"\"\"Send the given",
"\"\"\" self.mpi = mpi self.comm = mpi.COMM_WORLD self.status = mpi.Status() self.tag = tags.WORK",
"{}\".format(self.task._uid)) if self.task.skip is False: # This should always be False self.task.run() if",
"False: # This should always be False self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task))",
"def loop(self): \"\"\"Loop between receive and run until told to exit.\"\"\" while self.tag",
"defaults to MASTER (0) :type master: int, optional \"\"\" self.mpi = mpi self.comm",
"mpi self.comm = mpi.COMM_WORLD self.status = mpi.Status() self.tag = tags.WORK self.master = master",
"a message/Task from the Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag =",
"between receive and run until told to exit.\"\"\" while self.tag != tags.EXIT: self.receive()",
"self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between receive and run until",
"Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between receive and run until told",
"-*- \"\"\"Represents a Worker node.\"\"\" from ..util import tags, MASTER class Worker(object): \"\"\"A",
"a Worker node.\"\"\" from ..util import tags, MASTER class Worker(object): \"\"\"A Worker node",
"from logging import getLogger self.log = getLogger(\"{} {} {}\".format(__name__, name, rank)) def send(self,",
"MASTER class Worker(object): \"\"\"A Worker node runs Tasks that are provided by the",
"object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and act upon a message/Task",
"node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if self.tag == tags.WORK:",
"tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if self.tag == tags.WORK: self.task = task def",
"if self.task.skip is False: # This should always be False self.task.run() if __debug__:",
"with the global MPI object. :param mpi: the global MPI object :type mpi:",
"self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between receive",
"the PID and UID to the Master after completion.\"\"\" if self.tag == tags.EXIT:",
"Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if self.tag ==",
"self.comm = mpi.COMM_WORLD self.status = mpi.Status() self.tag = tags.WORK self.master = master if",
"dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and act upon a message/Task from the Master",
"def receive(self): \"\"\"Receive and act upon a message/Task from the Master node.\"\"\" task",
"= task def run(self): \"\"\"Run the current Task unless exiting. Sends the PID",
"global MPI object. :param mpi: the global MPI object :type mpi: MPI :param",
"message): \"\"\"Send the given message to the Master node. :param message: the message",
"unless exiting. Sends the PID and UID to the Master after completion.\"\"\" if",
"self.tag = self.status.Get_tag() if self.tag == tags.WORK: self.task = task def run(self): \"\"\"Run",
"are provided by the Master node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct a Worker",
"MPI object. :param mpi: the global MPI object :type mpi: MPI :param master:",
"given message to the Master node. :param message: the message to send :type",
"Worker(object): \"\"\"A Worker node runs Tasks that are provided by the Master node.\"\"\"",
"PID and UID to the Master after completion.\"\"\" if self.tag == tags.EXIT: return",
"import tags, MASTER class Worker(object): \"\"\"A Worker node runs Tasks that are provided",
"object :type mpi: MPI :param master: the id of the Master node, defaults",
"self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and act upon a message/Task from the",
"self.task.skip is False: # This should always be False self.task.run() if __debug__: self.log.debug(\"Finished",
"Worker with the global MPI object. :param mpi: the global MPI object :type",
"= tags.WORK self.master = master if __debug__: name = mpi.Get_processor_name() rank = self.comm.Get_rank()",
"{}\".format(__name__, name, rank)) def send(self, message): \"\"\"Send the given message to the Master",
"__debug__: name = mpi.Get_processor_name() rank = self.comm.Get_rank() from logging import getLogger self.log =",
"= self.comm.Get_rank() from logging import getLogger self.log = getLogger(\"{} {} {}\".format(__name__, name, rank))",
"message to send :type message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive",
"UID to the Master after completion.\"\"\" if self.tag == tags.EXIT: return if __debug__:",
"the message to send :type message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self):",
"self.status = mpi.Status() self.tag = tags.WORK self.master = master if __debug__: name =",
"if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between receive and",
"the Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if self.tag",
"python # -*- coding: utf-8 -*- \"\"\"Represents a Worker node.\"\"\" from ..util import",
"to the Master node. :param message: the message to send :type message: object",
":param master: the id of the Master node, defaults to MASTER (0) :type",
"\"\"\"A Worker node runs Tasks that are provided by the Master node.\"\"\" def",
"{} {}\".format(__name__, name, rank)) def send(self, message): \"\"\"Send the given message to the",
"and UID to the Master after completion.\"\"\" if self.tag == tags.EXIT: return if",
"that are provided by the Master node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct a",
"Master node. :param message: the message to send :type message: object \"\"\" self.comm.send(message,",
"MASTER (0) :type master: int, optional \"\"\" self.mpi = mpi self.comm = mpi.COMM_WORLD",
"This should always be False self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid))",
"to send :type message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and",
"to the Master after completion.\"\"\" if self.tag == tags.EXIT: return if __debug__: self.log.debug(\"Start",
"= mpi.Get_processor_name() rank = self.comm.Get_rank() from logging import getLogger self.log = getLogger(\"{} {}",
"the Master node, defaults to MASTER (0) :type master: int, optional \"\"\" self.mpi",
"status=self.status) self.tag = self.status.Get_tag() if self.tag == tags.WORK: self.task = task def run(self):",
"Worker node.\"\"\" from ..util import tags, MASTER class Worker(object): \"\"\"A Worker node runs",
"upon a message/Task from the Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag",
"-*- coding: utf-8 -*- \"\"\"Represents a Worker node.\"\"\" from ..util import tags, MASTER",
"def send(self, message): \"\"\"Send the given message to the Master node. :param message:",
"send(self, message): \"\"\"Send the given message to the Master node. :param message: the",
"= mpi.COMM_WORLD self.status = mpi.Status() self.tag = tags.WORK self.master = master if __debug__:",
"{}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between receive and run until told to",
"provided by the Master node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct a Worker with",
"__debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is False: # This should always be",
"False self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between",
"message: the message to send :type message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def",
"receive(self): \"\"\"Receive and act upon a message/Task from the Master node.\"\"\" task =",
":type mpi: MPI :param master: the id of the Master node, defaults to",
"Tasks that are provided by the Master node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct",
"if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is False: # This should always",
"tags.WORK self.master = master if __debug__: name = mpi.Get_processor_name() rank = self.comm.Get_rank() from",
"from ..util import tags, MASTER class Worker(object): \"\"\"A Worker node runs Tasks that",
"mpi: MPI :param master: the id of the Master node, defaults to MASTER",
"MPI object :type mpi: MPI :param master: the id of the Master node,",
"coding: utf-8 -*- \"\"\"Represents a Worker node.\"\"\" from ..util import tags, MASTER class",
"master: the id of the Master node, defaults to MASTER (0) :type master:",
"completion.\"\"\" if self.tag == tags.EXIT: return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip",
"= mpi.Status() self.tag = tags.WORK self.master = master if __debug__: name = mpi.Get_processor_name()",
"the Master node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct a Worker with the global",
"always be False self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self):",
"node, defaults to MASTER (0) :type master: int, optional \"\"\" self.mpi = mpi",
"self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between receive and run until told to exit.\"\"\"",
"self.task = task def run(self): \"\"\"Run the current Task unless exiting. Sends the",
"MPI :param master: the id of the Master node, defaults to MASTER (0)",
"task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if self.tag == tags.WORK: self.task",
"master: int, optional \"\"\" self.mpi = mpi self.comm = mpi.COMM_WORLD self.status = mpi.Status()",
"message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and act upon a",
"the Master after completion.\"\"\" if self.tag == tags.EXIT: return if __debug__: self.log.debug(\"Start Task",
"# -*- coding: utf-8 -*- \"\"\"Represents a Worker node.\"\"\" from ..util import tags,",
"node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct a Worker with the global MPI object.",
"= mpi self.comm = mpi.COMM_WORLD self.status = mpi.Status() self.tag = tags.WORK self.master =",
"self.tag = tags.WORK self.master = master if __debug__: name = mpi.Get_processor_name() rank =",
"= self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if self.tag == tags.WORK: self.task =",
"name, rank)) def send(self, message): \"\"\"Send the given message to the Master node.",
"== tags.EXIT: return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is False: #",
"the given message to the Master node. :param message: the message to send",
"return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is False: # This should",
"= self.status.Get_tag() if self.tag == tags.WORK: self.task = task def run(self): \"\"\"Run the",
"from the Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if",
"should always be False self.task.run() if __debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def",
"logging import getLogger self.log = getLogger(\"{} {} {}\".format(__name__, name, rank)) def send(self, message):",
"self.task._uid)) def loop(self): \"\"\"Loop between receive and run until told to exit.\"\"\" while",
"\"\"\" self.comm.send(message, dest=self.master, tag=self.tag) def receive(self): \"\"\"Receive and act upon a message/Task from",
"optional \"\"\" self.mpi = mpi self.comm = mpi.COMM_WORLD self.status = mpi.Status() self.tag =",
"tag=self.tag) def receive(self): \"\"\"Receive and act upon a message/Task from the Master node.\"\"\"",
"(0) :type master: int, optional \"\"\" self.mpi = mpi self.comm = mpi.COMM_WORLD self.status",
"\"\"\"Construct a Worker with the global MPI object. :param mpi: the global MPI",
"the current Task unless exiting. Sends the PID and UID to the Master",
"global MPI object :type mpi: MPI :param master: the id of the Master",
"== tags.WORK: self.task = task def run(self): \"\"\"Run the current Task unless exiting.",
"a Worker with the global MPI object. :param mpi: the global MPI object",
"node. :param message: the message to send :type message: object \"\"\" self.comm.send(message, dest=self.master,",
"object. :param mpi: the global MPI object :type mpi: MPI :param master: the",
"run(self): \"\"\"Run the current Task unless exiting. Sends the PID and UID to",
"master if __debug__: name = mpi.Get_processor_name() rank = self.comm.Get_rank() from logging import getLogger",
"__debug__: self.log.debug(\"Finished Task {}\".format(self.task)) self.send((self.task._pid, self.task._uid)) def loop(self): \"\"\"Loop between receive and run",
"rank = self.comm.Get_rank() from logging import getLogger self.log = getLogger(\"{} {} {}\".format(__name__, name,",
"node runs Tasks that are provided by the Master node.\"\"\" def __init__(self, mpi,",
"self.comm.Get_rank() from logging import getLogger self.log = getLogger(\"{} {} {}\".format(__name__, name, rank)) def",
":param mpi: the global MPI object :type mpi: MPI :param master: the id",
"by the Master node.\"\"\" def __init__(self, mpi, master=MASTER): \"\"\"Construct a Worker with the",
"the global MPI object. :param mpi: the global MPI object :type mpi: MPI",
"message to the Master node. :param message: the message to send :type message:",
"Worker node runs Tasks that are provided by the Master node.\"\"\" def __init__(self,",
"tags, MASTER class Worker(object): \"\"\"A Worker node runs Tasks that are provided by",
"tags.EXIT: return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is False: # This",
"if __debug__: name = mpi.Get_processor_name() rank = self.comm.Get_rank() from logging import getLogger self.log",
"mpi: the global MPI object :type mpi: MPI :param master: the id of",
"= master if __debug__: name = mpi.Get_processor_name() rank = self.comm.Get_rank() from logging import",
"mpi.Status() self.tag = tags.WORK self.master = master if __debug__: name = mpi.Get_processor_name() rank",
"the id of the Master node, defaults to MASTER (0) :type master: int,",
"name = mpi.Get_processor_name() rank = self.comm.Get_rank() from logging import getLogger self.log = getLogger(\"{}",
"\"\"\"Send the given message to the Master node. :param message: the message to",
"receive and run until told to exit.\"\"\" while self.tag != tags.EXIT: self.receive() self.run()",
"mpi.COMM_WORLD self.status = mpi.Status() self.tag = tags.WORK self.master = master if __debug__: name",
"self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status) self.tag = self.status.Get_tag() if self.tag == tags.WORK: self.task = task",
"\"\"\"Loop between receive and run until told to exit.\"\"\" while self.tag != tags.EXIT:",
"self.tag == tags.EXIT: return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is False:",
"self.log.debug(\"Start Task {}\".format(self.task._uid)) if self.task.skip is False: # This should always be False",
"to MASTER (0) :type master: int, optional \"\"\" self.mpi = mpi self.comm =",
"#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Represents a Worker node.\"\"\" from ..util",
"and act upon a message/Task from the Master node.\"\"\" task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG,",
"the global MPI object :type mpi: MPI :param master: the id of the",
"mpi.Get_processor_name() rank = self.comm.Get_rank() from logging import getLogger self.log = getLogger(\"{} {} {}\".format(__name__,",
"Task unless exiting. Sends the PID and UID to the Master after completion.\"\"\"",
"node.\"\"\" from ..util import tags, MASTER class Worker(object): \"\"\"A Worker node runs Tasks",
":type master: int, optional \"\"\" self.mpi = mpi self.comm = mpi.COMM_WORLD self.status =",
"import getLogger self.log = getLogger(\"{} {} {}\".format(__name__, name, rank)) def send(self, message): \"\"\"Send",
"Sends the PID and UID to the Master after completion.\"\"\" if self.tag ==",
"task def run(self): \"\"\"Run the current Task unless exiting. Sends the PID and",
"def run(self): \"\"\"Run the current Task unless exiting. Sends the PID and UID",
"\"\"\"Receive and act upon a message/Task from the Master node.\"\"\" task = self.comm.recv(source=self.master,",
"Master after completion.\"\"\" if self.tag == tags.EXIT: return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid))",
"after completion.\"\"\" if self.tag == tags.EXIT: return if __debug__: self.log.debug(\"Start Task {}\".format(self.task._uid)) if",
"self.mpi = mpi self.comm = mpi.COMM_WORLD self.status = mpi.Status() self.tag = tags.WORK self.master",
"runs Tasks that are provided by the Master node.\"\"\" def __init__(self, mpi, master=MASTER):",
"of the Master node, defaults to MASTER (0) :type master: int, optional \"\"\"",
":param message: the message to send :type message: object \"\"\" self.comm.send(message, dest=self.master, tag=self.tag)",
"is False: # This should always be False self.task.run() if __debug__: self.log.debug(\"Finished Task",
"loop(self): \"\"\"Loop between receive and run until told to exit.\"\"\" while self.tag !="
] |
[
") return parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app import make_app application = make_app()",
"help='Launch the server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address to bind to'",
"'-H', '--host', default='127.0.0.1', help='IP address to bind to' ) run_parser.add_argument( '-p', '--port', type=int,",
"the server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address to bind to' )",
"'--port', type=int, default=5001, help='port number to bind to' ) return parser.parse_args() def main():",
"to' ) return parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app import make_app application =",
"Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web service') subparsers =",
"import make_app application = make_app() opts = parse_args() run_simple(opts.host, opts.port, application, use_reloader=True, use_debugger=False)",
"load_dotenv from werkzeug import run_simple def parse_args() -> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter}",
"from .app import make_app application = make_app() opts = parse_args() run_simple(opts.host, opts.port, application,",
"find_dotenv, load_dotenv from werkzeug import run_simple def parse_args() -> Namespace: parser_kw = {'formatter_class':",
"parse_args() -> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web service')",
"= parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument( '-H', '--host',",
"Namespace from dotenv import find_dotenv, load_dotenv from werkzeug import run_simple def parse_args() ->",
") run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port number to bind to' ) return",
"parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1',",
"auxiliary web service') subparsers = parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch the server',",
"bind to' ) return parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app import make_app application",
"default=5001, help='port number to bind to' ) return parser.parse_args() def main(): load_dotenv(find_dotenv()) from",
"-> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web service') subparsers",
"ArgumentParser(description='Chitty auxiliary web service') subparsers = parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch the",
"ArgumentParser, Namespace from dotenv import find_dotenv, load_dotenv from werkzeug import run_simple def parse_args()",
"help='IP address to bind to' ) run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port number",
"default='127.0.0.1', help='IP address to bind to' ) run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port",
"= ArgumentParser(description='Chitty auxiliary web service') subparsers = parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch",
"load_dotenv(find_dotenv()) from .app import make_app application = make_app() opts = parse_args() run_simple(opts.host, opts.port,",
"run_simple def parse_args() -> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary",
"service') subparsers = parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument(",
"'-p', '--port', type=int, default=5001, help='port number to bind to' ) return parser.parse_args() def",
"bind to' ) run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port number to bind to'",
"parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app import make_app application = make_app() opts =",
"= subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address to",
"main(): load_dotenv(find_dotenv()) from .app import make_app application = make_app() opts = parse_args() run_simple(opts.host,",
"from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace from dotenv import find_dotenv, load_dotenv from werkzeug",
"import run_simple def parse_args() -> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty",
"to bind to' ) run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port number to bind",
"run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port number to bind to' ) return parser.parse_args()",
"server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address to bind to' ) run_parser.add_argument(",
"type=int, default=5001, help='port number to bind to' ) return parser.parse_args() def main(): load_dotenv(find_dotenv())",
"run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address",
"subparsers = parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument( '-H',",
"help='port number to bind to' ) return parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app",
"address to bind to' ) run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port number to",
"import find_dotenv, load_dotenv from werkzeug import run_simple def parse_args() -> Namespace: parser_kw =",
"def parse_args() -> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web",
"from werkzeug import run_simple def parse_args() -> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser",
"= {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web service') subparsers = parser.add_subparsers(help='Available commands')",
"{'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web service') subparsers = parser.add_subparsers(help='Available commands') run_parser",
"ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace from dotenv import find_dotenv, load_dotenv from werkzeug import run_simple def",
"web service') subparsers = parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw)",
"from dotenv import find_dotenv, load_dotenv from werkzeug import run_simple def parse_args() -> Namespace:",
"def main(): load_dotenv(find_dotenv()) from .app import make_app application = make_app() opts = parse_args()",
"'--host', default='127.0.0.1', help='IP address to bind to' ) run_parser.add_argument( '-p', '--port', type=int, default=5001,",
"run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address to bind to' ) run_parser.add_argument( '-p', '--port',",
"return parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app import make_app application = make_app() opts",
".app import make_app application = make_app() opts = parse_args() run_simple(opts.host, opts.port, application, use_reloader=True,",
"subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address to bind",
"to bind to' ) return parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app import make_app",
"import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace from dotenv import find_dotenv, load_dotenv from werkzeug import run_simple",
"number to bind to' ) return parser.parse_args() def main(): load_dotenv(find_dotenv()) from .app import",
"parser = ArgumentParser(description='Chitty auxiliary web service') subparsers = parser.add_subparsers(help='Available commands') run_parser = subparsers.add_parser('run',",
"dotenv import find_dotenv, load_dotenv from werkzeug import run_simple def parse_args() -> Namespace: parser_kw",
"to' ) run_parser.add_argument( '-p', '--port', type=int, default=5001, help='port number to bind to' )",
"ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web service') subparsers = parser.add_subparsers(help='Available commands') run_parser =",
"**parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP address to bind to' ) run_parser.add_argument( '-p',",
"argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace from dotenv import find_dotenv, load_dotenv from werkzeug import",
"werkzeug import run_simple def parse_args() -> Namespace: parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser =",
"parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter} parser = ArgumentParser(description='Chitty auxiliary web service') subparsers = parser.add_subparsers(help='Available",
"commands') run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw) run_parser.add_argument( '-H', '--host', default='127.0.0.1', help='IP"
] |
[
"HUC 12 # huc = \"020402060105\" # # parameter_code = \"00060\" # start_date",
"service, parameter_code, start_date, end_date): data_sites = [] sites_with_param = [] for site in",
"site in sites: try: site_data = hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df =",
"returned as USGS-[site_num] id_num = site_id.split('-')[1] # check if id is in the",
"json.dump(data, fl) return data def get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites = []",
"site_data = hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got",
"import pandas as pd import json import ulmo import hydrofunctions as hf from",
"start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {} ',",
"huc = \"020402060105\" # # parameter_code = \"00060\" # start_date = \"2018-01-01\" #",
"site_data # get all sites for a HUC 12 # huc = \"020402060105\"",
"pandas as pd import json import ulmo import hydrofunctions as hf from hydrofunctions.exceptions",
"= pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list, data",
"# is returned as USGS-[site_num] id_num = site_id.split('-')[1] # check if id is",
"for a HUC 12 # huc = \"020402060105\" # # parameter_code = \"00060\"",
"start_date, end_date): data_sites = [] sites_with_param = [] for site in sites: try:",
"site_data = get_data_from_sites(huc_site_list, service, param, start_date, end_date) return site_data # get all sites",
"site) except HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined",
"ulmo import hydrofunctions as hf from hydrofunctions.exceptions import HydroNoDataError from utils import get_sites_in_basin",
"as USGS-[site_num] id_num = site_id.split('-')[1] # check if id is in the list",
"\"00060\" # start_date = \"2018-01-01\" # end_date = \"2019-01-10\" # service = 'dv'",
"huc_site_list, data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param, start_date, end_date) return site_data",
"= ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just for sites with param sites_with_param_data =",
"parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {} ', site) except",
"data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list,",
"file_name: with open(file_name, 'w') as fl: json.dump(data, fl) return data def get_data_from_sites(sites, service,",
"parameter_code, start_date, end_date): data_sites = [] sites_with_param = [] for site in sites:",
"get geojson just for sites with param sites_with_param_data = [] for site in",
"= \"020402060105\" # # parameter_code = \"00060\" # start_date = \"2018-01-01\" # end_date",
"get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param, start_date, end_date) return site_data # get all",
"get_json_site_param(huc, param, file_name=None): # get all the sites in the huc sites_in_huc, data",
"in sites: try: site_data = hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df()",
"hydrofunctions.exceptions import HydroNoDataError from utils import get_sites_in_basin def get_json_site_param(huc, param, file_name=None): # get",
"in data['features']: site_id = site['properties']['identifier'] # is returned as USGS-[site_num] id_num = site_id.split('-')[1]",
"sites for a HUC 12 # huc = \"020402060105\" # # parameter_code =",
"the huc sites_in_huc, data = get_sites_in_basin(huc) # get all the sites in the",
"sites_with_param = [] for site in sites: try: site_data = hf.NWIS(site, service, start_date,",
"in the huc sites_in_huc, data = get_sites_in_basin(huc) # get all the sites in",
"site_id.split('-')[1] # check if id is in the list of sites with param",
"in the list of sites with param if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features']",
"fl) return data def get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites = [] sites_with_param",
"param, file_name=None): # get all the sites in the huc sites_in_huc, data =",
"= [] sites_with_param = [] for site in sites: try: site_data = hf.NWIS(site,",
"data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {} ', site) except HydroNoDataError: print(\"no data for",
"parameter_code = \"00060\" # start_date = \"2018-01-01\" # end_date = \"2019-01-10\" # service",
"end_date) return site_data # get all sites for a HUC 12 # huc",
"def get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list,",
"service, param, start_date, end_date) return site_data # get all sites for a HUC",
"sites in the huc sites_in_huc, data = get_sites_in_basin(huc) # get all the sites",
"axis=1) return data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc)",
"data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param, start_date, end_date) return site_data #",
"def get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites = [] sites_with_param = [] for",
"site_id = site['properties']['identifier'] # is returned as USGS-[site_num] id_num = site_id.split('-')[1] # check",
"sites_with_param_data if file_name: with open(file_name, 'w') as fl: json.dump(data, fl) return data def",
"for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date,",
"all the sites in the huc sites_in_huc, data = get_sites_in_basin(huc) # get all",
"[] for site in sites: try: site_data = hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code)",
"end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param, start_date, end_date)",
"with param if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name: with",
"in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name: with open(file_name, 'w') as fl:",
"data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data =",
"id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name: with open(file_name, 'w') as",
"# get all sites for a HUC 12 # huc = \"020402060105\" #",
"= site_id.split('-')[1] # check if id is in the list of sites with",
"if file_name: with open(file_name, 'w') as fl: json.dump(data, fl) return data def get_data_from_sites(sites,",
"a HUC 12 # huc = \"020402060105\" # # parameter_code = \"00060\" #",
"= site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {} ', site) except HydroNoDataError: print(\"no",
"json import ulmo import hydrofunctions as hf from hydrofunctions.exceptions import HydroNoDataError from utils",
"sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just for sites with param sites_with_param_data",
"with param sites_with_param_data = [] for site in data['features']: site_id = site['properties']['identifier'] #",
"pd import json import ulmo import hydrofunctions as hf from hydrofunctions.exceptions import HydroNoDataError",
"= [] for site in data['features']: site_id = site['properties']['identifier'] # is returned as",
"import get_sites_in_basin def get_json_site_param(huc, param, file_name=None): # get all the sites in the",
"<gh_stars>0 # coding: utf-8 import pandas as pd import json import ulmo import",
"if id is in the list of sites with param if id_num in",
"[] for site in data['features']: site_id = site['properties']['identifier'] # is returned as USGS-[site_num]",
"site['properties']['identifier'] # is returned as USGS-[site_num] id_num = site_id.split('-')[1] # check if id",
"sites with param sites_with_param_data = [] for site in data['features']: site_id = site['properties']['identifier']",
"data def get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites = [] sites_with_param = []",
"site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {} ', site) except HydroNoDataError:",
"{} ', site) except HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1)",
"from hydrofunctions.exceptions import HydroNoDataError from utils import get_sites_in_basin def get_json_site_param(huc, param, file_name=None): #",
"get all the sites in the huc that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc,",
"sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name: with open(file_name, 'w') as fl: json.dump(data,",
"service, start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {}",
"get all sites for a HUC 12 # huc = \"020402060105\" # #",
"# # parameter_code = \"00060\" # start_date = \"2018-01-01\" # end_date = \"2019-01-10\"",
"is returned as USGS-[site_num] id_num = site_id.split('-')[1] # check if id is in",
"sites_with_param.append(site) print('got data for {} ', site) except HydroNoDataError: print(\"no data for {}\".format(site))",
"as fl: json.dump(data, fl) return data def get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites",
"end_date): data_sites = [] sites_with_param = [] for site in sites: try: site_data",
"param, start_date, end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param,",
"that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just for sites",
"list of sites with param if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data",
"service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param, start_date, end_date) return",
"except HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined def",
"coding: utf-8 import pandas as pd import json import ulmo import hydrofunctions as",
"the sites in the huc sites_in_huc, data = get_sites_in_basin(huc) # get all the",
"start_date, end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param, start_date,",
"all sites for a HUC 12 # huc = \"020402060105\" # # parameter_code",
"in the huc that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson",
"id_num = site_id.split('-')[1] # check if id is in the list of sites",
"param sites_with_param_data = [] for site in data['features']: site_id = site['properties']['identifier'] # is",
"get_sites_in_basin(huc) # get all the sites in the huc that have param sites_with_param",
"\"020402060105\" # # parameter_code = \"00060\" # start_date = \"2018-01-01\" # end_date =",
"check if id is in the list of sites with param if id_num",
"pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list, data =",
"sites in the huc that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get",
"with open(file_name, 'w') as fl: json.dump(data, fl) return data def get_data_from_sites(sites, service, parameter_code,",
"param, start_date, end_date) return site_data # get all sites for a HUC 12",
"utf-8 import pandas as pd import json import ulmo import hydrofunctions as hf",
"if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name: with open(file_name, 'w')",
"just for sites with param sites_with_param_data = [] for site in data['features']: site_id",
"12 # huc = \"020402060105\" # # parameter_code = \"00060\" # start_date =",
"for {} ', site) except HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites,",
"utils import get_sites_in_basin def get_json_site_param(huc, param, file_name=None): # get all the sites in",
"= sites_with_param_data if file_name: with open(file_name, 'w') as fl: json.dump(data, fl) return data",
"all the sites in the huc that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param)",
"huc that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just for",
"= hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data",
"data = get_sites_in_basin(huc) # get all the sites in the huc that have",
"return data def get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites = [] sites_with_param =",
"', site) except HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return",
"print(\"no data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc, param,",
"# coding: utf-8 import pandas as pd import json import ulmo import hydrofunctions",
"get_sites_in_basin def get_json_site_param(huc, param, file_name=None): # get all the sites in the huc",
"id is in the list of sites with param if id_num in sites_with_param.keys():",
"site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {} ', site) except HydroNoDataError: print(\"no data",
"from utils import get_sites_in_basin def get_json_site_param(huc, param, file_name=None): # get all the sites",
"data for {} ', site) except HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined =",
"is in the list of sites with param if id_num in sites_with_param.keys(): sites_with_param_data.append(site)",
"try: site_data = hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site)",
"hydrofunctions as hf from hydrofunctions.exceptions import HydroNoDataError from utils import get_sites_in_basin def get_json_site_param(huc,",
"HydroNoDataError from utils import get_sites_in_basin def get_json_site_param(huc, param, file_name=None): # get all the",
"for site in data['features']: site_id = site['properties']['identifier'] # is returned as USGS-[site_num] id_num",
"# parameter_code = \"00060\" # start_date = \"2018-01-01\" # end_date = \"2019-01-10\" #",
"start_date, end_date) return site_data # get all sites for a HUC 12 #",
"print('got data for {} ', site) except HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined",
"get all the sites in the huc sites_in_huc, data = get_sites_in_basin(huc) # get",
"return data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data",
"return site_data # get all sites for a HUC 12 # huc =",
"= get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service, param, start_date, end_date) return site_data # get",
"hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for",
"# check if id is in the list of sites with param if",
"def get_json_site_param(huc, param, file_name=None): # get all the sites in the huc sites_in_huc,",
"ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just for sites with param sites_with_param_data = []",
"site in data['features']: site_id = site['properties']['identifier'] # is returned as USGS-[site_num] id_num =",
"the huc that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just",
"# get all the sites in the huc sites_in_huc, data = get_sites_in_basin(huc) #",
"data['features']: site_id = site['properties']['identifier'] # is returned as USGS-[site_num] id_num = site_id.split('-')[1] #",
"data['features'] = sites_with_param_data if file_name: with open(file_name, 'w') as fl: json.dump(data, fl) return",
"'w') as fl: json.dump(data, fl) return data def get_data_from_sites(sites, service, parameter_code, start_date, end_date):",
"fl: json.dump(data, fl) return data def get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites =",
"{}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc, param, start_date, end_date, service='dv'):",
"# get geojson just for sites with param sites_with_param_data = [] for site",
"# get all the sites in the huc that have param sites_with_param =",
"param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just for sites with param",
"sites_with_param_data = [] for site in data['features']: site_id = site['properties']['identifier'] # is returned",
"as hf from hydrofunctions.exceptions import HydroNoDataError from utils import get_sites_in_basin def get_json_site_param(huc, param,",
"[] sites_with_param = [] for site in sites: try: site_data = hf.NWIS(site, service,",
"for site in sites: try: site_data = hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df",
"= get_sites_in_basin(huc) # get all the sites in the huc that have param",
"parameter_code=param) # get geojson just for sites with param sites_with_param_data = [] for",
"file_name=None): # get all the sites in the huc sites_in_huc, data = get_sites_in_basin(huc)",
"huc sites_in_huc, data = get_sites_in_basin(huc) # get all the sites in the huc",
"get_data_for_huc(huc, param, start_date, end_date, service='dv'): huc_site_list, data = get_sites_in_basin(huc) site_data = get_data_from_sites(huc_site_list, service,",
"import json import ulmo import hydrofunctions as hf from hydrofunctions.exceptions import HydroNoDataError from",
"the sites in the huc that have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) #",
"as pd import json import ulmo import hydrofunctions as hf from hydrofunctions.exceptions import",
"import ulmo import hydrofunctions as hf from hydrofunctions.exceptions import HydroNoDataError from utils import",
"param if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name: with open(file_name,",
"sites_in_huc, data = get_sites_in_basin(huc) # get all the sites in the huc that",
"= [] for site in sites: try: site_data = hf.NWIS(site, service, start_date, end_date,",
"sites: try: site_data = hf.NWIS(site, service, start_date, end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df)",
"hf from hydrofunctions.exceptions import HydroNoDataError from utils import get_sites_in_basin def get_json_site_param(huc, param, file_name=None):",
"sites with param if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name:",
"geojson just for sites with param sites_with_param_data = [] for site in data['features']:",
"= site['properties']['identifier'] # is returned as USGS-[site_num] id_num = site_id.split('-')[1] # check if",
"= \"00060\" # start_date = \"2018-01-01\" # end_date = \"2019-01-10\" # service =",
"sites_with_param_data.append(site) data['features'] = sites_with_param_data if file_name: with open(file_name, 'w') as fl: json.dump(data, fl)",
"= get_data_from_sites(huc_site_list, service, param, start_date, end_date) return site_data # get all sites for",
"for sites with param sites_with_param_data = [] for site in data['features']: site_id =",
"import hydrofunctions as hf from hydrofunctions.exceptions import HydroNoDataError from utils import get_sites_in_basin def",
"have param sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc, parameter_code=param) # get geojson just for sites with",
"open(file_name, 'w') as fl: json.dump(data, fl) return data def get_data_from_sites(sites, service, parameter_code, start_date,",
"HydroNoDataError: print(\"no data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc,",
"the list of sites with param if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] =",
"data_sites = [] sites_with_param = [] for site in sites: try: site_data =",
"get_data_from_sites(huc_site_list, service, param, start_date, end_date) return site_data # get all sites for a",
"of sites with param if id_num in sites_with_param.keys(): sites_with_param_data.append(site) data['features'] = sites_with_param_data if",
"USGS-[site_num] id_num = site_id.split('-')[1] # check if id is in the list of",
"get_data_from_sites(sites, service, parameter_code, start_date, end_date): data_sites = [] sites_with_param = [] for site",
"end_date, parameterCd=parameter_code) site_data_df = site_data.get_data().df() data_sites.append(site_data_df) sites_with_param.append(site) print('got data for {} ', site)",
"import HydroNoDataError from utils import get_sites_in_basin def get_json_site_param(huc, param, file_name=None): # get all",
"data for {}\".format(site)) data_from_sites_combined = pd.concat(data_sites, axis=1) return data_from_sites_combined def get_data_for_huc(huc, param, start_date,",
"# huc = \"020402060105\" # # parameter_code = \"00060\" # start_date = \"2018-01-01\""
] |
[
"dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ] operations = [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True,",
"by Django 3.0.7 on 2020-07-10 19:45 from django.db import migrations, models import django.db.models.deletion",
"name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True, null=True,",
"'0007_staffprofile_added_by'), ] operations = [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand',",
"[ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile',",
"django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ] operations = [ migrations.AddField(",
"related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='staff_profile', to='usermanager.Brand', verbose_name='Brand'),",
"3.0.7 on 2020-07-10 19:45 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):",
"migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand',",
"class Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ] operations = [ migrations.AddField( model_name='clientprofile',",
"operations = [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ),",
"on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='staff_profile', to='usermanager.Brand',",
"to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='staff_profile', to='usermanager.Brand', verbose_name='Brand'), ),",
"model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True,",
"field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,",
"= [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField(",
"# Generated by Django 3.0.7 on 2020-07-10 19:45 from django.db import migrations, models",
"null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='staff_profile',",
"= [ ('usermanager', '0007_staffprofile_added_by'), ] operations = [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True,",
"migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ] operations",
"verbose_name='Brand'), ), migrations.AddField( model_name='staffprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='staff_profile', to='usermanager.Brand', verbose_name='Brand'), ), ]",
"from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('usermanager',",
"import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ]",
"2020-07-10 19:45 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =",
"import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ] operations = [",
"19:45 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [",
"] operations = [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'),",
"[ ('usermanager', '0007_staffprofile_added_by'), ] operations = [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,",
"django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'),",
"('usermanager', '0007_staffprofile_added_by'), ] operations = [ migrations.AddField( model_name='clientprofile', name='brand', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile',",
"Generated by Django 3.0.7 on 2020-07-10 19:45 from django.db import migrations, models import",
"Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ] operations = [ migrations.AddField( model_name='clientprofile', name='brand',",
"on 2020-07-10 19:45 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies",
"models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('usermanager', '0007_staffprofile_added_by'), ] operations =",
"Django 3.0.7 on 2020-07-10 19:45 from django.db import migrations, models import django.db.models.deletion class",
"<reponame>aakashbajaj/Youngun-Campaign-Tracking<filename>backend/youngun/youngun/apps/usermanager/migrations/0008_auto_20200710_1945.py # Generated by Django 3.0.7 on 2020-07-10 19:45 from django.db import migrations,"
] |
[
"id = None # type: \"int64\" added_date = None # type: \"int32\" sizes",
"<gh_stars>10-100 from ..factory import Type class userProfilePhoto(Type): id = None # type: \"int64\"",
"import Type class userProfilePhoto(Type): id = None # type: \"int64\" added_date = None",
"= None # type: \"int64\" added_date = None # type: \"int32\" sizes =",
"from ..factory import Type class userProfilePhoto(Type): id = None # type: \"int64\" added_date",
"None # type: \"int64\" added_date = None # type: \"int32\" sizes = None",
"type: \"int64\" added_date = None # type: \"int32\" sizes = None # type:",
"userProfilePhoto(Type): id = None # type: \"int64\" added_date = None # type: \"int32\"",
"..factory import Type class userProfilePhoto(Type): id = None # type: \"int64\" added_date =",
"Type class userProfilePhoto(Type): id = None # type: \"int64\" added_date = None #",
"\"int64\" added_date = None # type: \"int32\" sizes = None # type: \"vector<photoSize>\"",
"# type: \"int64\" added_date = None # type: \"int32\" sizes = None #",
"class userProfilePhoto(Type): id = None # type: \"int64\" added_date = None # type:"
] |
[
"in range (i+1, 10): if (arr[i] == arr[j]): arr[j] = -1 print(arr[i], end",
"for j in range (i+1, 10): if (arr[i] == arr[j]): arr[j] = -1",
"if (arr[i]!= -1): for j in range (i+1, 10): if (arr[i] == arr[j]):",
"arr = [ 5,3,5,2,41,4,3,1,4,4 ] for i in range(10): if (arr[i]!= -1): for",
"-1): for j in range (i+1, 10): if (arr[i] == arr[j]): arr[j] =",
"] for i in range(10): if (arr[i]!= -1): for j in range (i+1,",
"[ 5,3,5,2,41,4,3,1,4,4 ] for i in range(10): if (arr[i]!= -1): for j in",
"(arr[i]!= -1): for j in range (i+1, 10): if (arr[i] == arr[j]): arr[j]",
"i in range(10): if (arr[i]!= -1): for j in range (i+1, 10): if",
"<gh_stars>1-10 arr = [ 5,3,5,2,41,4,3,1,4,4 ] for i in range(10): if (arr[i]!= -1):",
"10): if (arr[i] == arr[j]): arr[j] = -1 print(arr[i], end = ' ')",
"for i in range(10): if (arr[i]!= -1): for j in range (i+1, 10):",
"j in range (i+1, 10): if (arr[i] == arr[j]): arr[j] = -1 print(arr[i],",
"range (i+1, 10): if (arr[i] == arr[j]): arr[j] = -1 print(arr[i], end =",
"in range(10): if (arr[i]!= -1): for j in range (i+1, 10): if (arr[i]",
"range(10): if (arr[i]!= -1): for j in range (i+1, 10): if (arr[i] ==",
"= [ 5,3,5,2,41,4,3,1,4,4 ] for i in range(10): if (arr[i]!= -1): for j",
"5,3,5,2,41,4,3,1,4,4 ] for i in range(10): if (arr[i]!= -1): for j in range",
"(i+1, 10): if (arr[i] == arr[j]): arr[j] = -1 print(arr[i], end = '"
] |
[
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"under the License. import json import structlog from rssfly.extractor.common import Chapter, Comic, Context,",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"]",
"may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #",
"License. # You may obtain a copy of the License at # #",
"Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\" @property",
"# limitations under the License. import json import structlog from rssfly.extractor.common import Chapter,",
"response = json.loads(raw_bytes) chapters = {} for chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"]))",
"specific language governing permissions and # limitations under the License. import json import",
"@property def name(self): return \"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\" def extract(self, context:",
"law or agreed to in writing, software # distributed under the License is",
"the License for the specific language governing permissions and # limitations under the",
"def publisher(self): return \"Fanbox\" def extract(self, context: Context, comic_id: str) -> Comic: url",
"return \"Fanbox\" def extract(self, context: Context, comic_id: str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\"",
"compliance with the License. # You may obtain a copy of the License",
"Comic, Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11;",
"json.loads(raw_bytes) chapters = {} for chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title =",
"chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title,",
"logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0)",
"= Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list = list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id)",
"chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list = list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"governing permissions and # limitations under the License. import json import structlog from",
"this file except in compliance with the License. # You may obtain a",
"comic_id: str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url) headers",
"chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL",
"import Chapter, Comic, Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent:",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters =",
"you may not use this file except in compliance with the License. #",
"for the specific language governing permissions and # limitations under the License. import",
"Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\" @property def",
"\"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\" def extract(self, context: Context, comic_id: str) ->",
"# You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0",
"rssfly.extractor.common import Chapter, Comic, Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\":",
"# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\",",
"\"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class",
"Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes",
"ANY KIND, either express or implied. # See the License for the specific",
"raw_bytes = context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters = {} for chapter in",
"headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers)",
"chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url,",
"# Deduplicate by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list =",
"def extract(self, context: Context, comic_id: str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from",
"in compliance with the License. # You may obtain a copy of the",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"use this file except in compliance with the License. # You may obtain",
"chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id, name=response[\"body\"][\"items\"][0][\"user\"][\"name\"] + \"’s Fanbox\", url=f\"https://{comic_id}.fanbox.cc\", chapters=chapter_list, )",
"not use this file except in compliance with the License. # You may",
"License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list = list( sorted(chapters.values(), key=lambda",
"str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url) headers =",
"See the License for the specific language governing permissions and # limitations under",
"You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 #",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"name(self): return \"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\" def extract(self, context: Context, comic_id:",
"f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list",
"limitations under the License. import json import structlog from rssfly.extractor.common import Chapter, Comic,",
"sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id, name=response[\"body\"][\"items\"][0][\"user\"][\"name\"] + \"’s Fanbox\",",
"copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property def name(self):",
"= f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, )",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"and # limitations under the License. import json import structlog from rssfly.extractor.common import",
"= f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\"",
"in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate",
"OF ANY KIND, either express or implied. # See the License for the",
"= {} for chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"# # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"# you may not use this file except in compliance with the License.",
"of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"\"application/json\", } class FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\" @property def publisher(self): return",
"headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters = {} for chapter",
"2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"agreed to in writing, software # distributed under the License is distributed on",
"context: Context, comic_id: str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\",",
"f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"]",
"FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\" def extract(self,",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"from Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"]",
"context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters = {} for chapter in response[\"body\"][\"items\"]: chapter_id",
"-> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy()",
"obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"(the \"License\"); # you may not use this file except in compliance with",
"} class FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\"",
"name=chapter_title, url=chapter_url, ) chapter_list = list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return Comic(",
") chapter_list = list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id,",
"logger.info(\"Fetching from Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] =",
"# # Unless required by applicable law or agreed to in writing, software",
"License. import json import structlog from rssfly.extractor.common import Chapter, Comic, Context, Extractor logger",
"\"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor):",
"headers=headers) response = json.loads(raw_bytes) chapters = {} for chapter in response[\"body\"][\"items\"]: chapter_id =",
"express or implied. # See the License for the specific language governing permissions",
"chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list = list( sorted(chapters.values(), key=lambda chapter:",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"# Unless required by applicable law or agreed to in writing, software #",
"except in compliance with the License. # You may obtain a copy of",
"= structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101",
"Chapter, Comic, Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0",
"by applicable law or agreed to in writing, software # distributed under the",
"the License. import json import structlog from rssfly.extractor.common import Chapter, Comic, Context, Extractor",
"language governing permissions and # limitations under the License. import json import structlog",
"\"Fanbox\" def extract(self, context: Context, comic_id: str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching",
"by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list = list( sorted(chapters.values(),",
"@property def publisher(self): return \"Fanbox\" def extract(self, context: Context, comic_id: str) -> Comic:",
"chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url] = Chapter(",
"either express or implied. # See the License for the specific language governing",
"= _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url,",
"= context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters = {} for chapter in response[\"body\"][\"items\"]:",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"= list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id, name=response[\"body\"][\"items\"][0][\"user\"][\"name\"] +",
"at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response = json.loads(raw_bytes)",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"= { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\",",
"url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] =",
"\"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url] =",
"file except in compliance with the License. # You may obtain a copy",
"{ \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", }",
"for chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\"",
"# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0",
"Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64;",
"permissions and # limitations under the License. import json import structlog from rssfly.extractor.common",
"def name(self): return \"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\" def extract(self, context: Context,",
"<reponame>lidavidm/rssfly # Copyright 2021 <NAME> # # Licensed under the Apache License, Version",
"API\", url=url) headers = _DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes =",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property def name(self): return",
"class FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\" def",
"License for the specific language governing permissions and # limitations under the License.",
"return \"pixiv_fanbox\" @property def publisher(self): return \"Fanbox\" def extract(self, context: Context, comic_id: str)",
"a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"key=lambda chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id, name=response[\"body\"][\"items\"][0][\"user\"][\"name\"] + \"’s Fanbox\", url=f\"https://{comic_id}.fanbox.cc\",",
"the License. # You may obtain a copy of the License at #",
"\"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\" @property def publisher(self):",
"{} for chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url =",
"to in writing, software # distributed under the License is distributed on an",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"the specific language governing permissions and # limitations under the License. import json",
"chapters = {} for chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"]",
"Context, comic_id: str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox API\", url=url)",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"implied. # See the License for the specific language governing permissions and #",
"\"License\"); # you may not use this file except in compliance with the",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"= chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id,",
"(X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property def",
"required by applicable law or agreed to in writing, software # distributed under",
"Deduplicate by URL chapters[chapter_url] = Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list = list(",
"_DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\":",
"rv:92.0) Gecko/20100101 Firefox/92.0\", \"accept\": \"application/json\", } class FanboxExtractor(Extractor): @property def name(self): return \"pixiv_fanbox\"",
"url=chapter_url, ) chapter_list = list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher,",
"_DEFAULT_HEADERS.copy() headers[\"referer\"] = f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response =",
"import json import structlog from rssfly.extractor.common import Chapter, Comic, Context, Extractor logger =",
"applicable law or agreed to in writing, software # distributed under the License",
"from rssfly.extractor.common import Chapter, Comic, Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = {",
"response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by",
"import structlog from rssfly.extractor.common import Chapter, Comic, Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS",
"= f\"https://{comic_id}.fanbox.cc\" headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters",
"= headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters = {} for",
"chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" #",
"or agreed to in writing, software # distributed under the License is distributed",
"headers[\"origin\"] = headers[\"referer\"] raw_bytes = context.get_bytes(url, headers=headers) response = json.loads(raw_bytes) chapters = {}",
"or implied. # See the License for the specific language governing permissions and",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS = { \"user-agent\": \"User-Agent: Mozilla/5.0 (X11; Linux",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id, name=response[\"body\"][\"items\"][0][\"user\"][\"name\"] + \"’s",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"publisher(self): return \"Fanbox\" def extract(self, context: Context, comic_id: str) -> Comic: url =",
"= \"{:012}\".format(int(chapter[\"id\"])) chapter_title = chapter[\"title\"] chapter_url = f\"https://{comic_id}.fanbox.cc/posts/{chapter['id']}\" # Deduplicate by URL chapters[chapter_url]",
"json import structlog from rssfly.extractor.common import Chapter, Comic, Context, Extractor logger = structlog.get_logger(__name__)",
"with the License. # You may obtain a copy of the License at",
"structlog from rssfly.extractor.common import Chapter, Comic, Context, Extractor logger = structlog.get_logger(__name__) _DEFAULT_HEADERS =",
"Chapter( chapter_id=chapter_id, name=chapter_title, url=chapter_url, ) chapter_list = list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) )",
"Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the",
"in writing, software # distributed under the License is distributed on an \"AS",
"chapter_list = list( sorted(chapters.values(), key=lambda chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id, name=response[\"body\"][\"items\"][0][\"user\"][\"name\"]",
"chapter: chapter.chapter_id) ) return Comic( publisher=self.publisher, comic_id=comic_id, name=response[\"body\"][\"items\"][0][\"user\"][\"name\"] + \"’s Fanbox\", url=f\"https://{comic_id}.fanbox.cc\", chapters=chapter_list,",
"= json.loads(raw_bytes) chapters = {} for chapter in response[\"body\"][\"items\"]: chapter_id = \"{:012}\".format(int(chapter[\"id\"])) chapter_title",
"extract(self, context: Context, comic_id: str) -> Comic: url = f\"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10\" logger.info(\"Fetching from Fanbox",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use"
] |
[
"form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\",",
"sw=4 encoding=utf-8 from mock import Mock from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField,",
"SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def __init__(self, database_manager):",
"be a date in the following format: day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat",
"date\", code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date in",
"group?\", code=\"BG\", name=\"What is your blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"),",
"name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\",",
"self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field])",
"GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager =",
"fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field,",
"__init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids",
"Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight",
"weight loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders",
"(\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What",
"code=\"BG\", name=\"What is your blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\",",
"following format: day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are",
"self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be",
"loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders \",",
"entity?\", code=\"EID\", name=\"What is associatéd entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What is",
"et sw=4 encoding=utf-8 from mock import Mock from mangrove.form_model.field import TextField, SelectField, DateField,",
"import TextField, SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def",
"\"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your blood group?\",",
"form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field",
"form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids",
"(\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your blood group?\", code=\"BG\",",
"form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\",",
"form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], )",
"self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field],",
"disorders \", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your blood group?\", code=\"BG\", name=\"What",
"loss\", \"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your blood",
"form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\",",
"self.gps_field], ) def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What is",
"single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your blood group?\", code=\"BG\", name=\"What is your blood",
"encoding=utf-8 from mock import Mock from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField",
"= SelectField(label=\"What is your blood group?\", code=\"BG\", name=\"What is your blood group?\", options=[(\"O+\",",
"date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date in the following format: day.month.year. Example: 25.12.2011\")",
"def __init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\",",
"\"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders \", \"e\")],",
"form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\",",
"label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated",
"(\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False)",
"cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field",
"name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\",",
"associatéd entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer",
"code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date in the",
"<filename>datawinners/project/tests/form_model_generator.py<gh_stars>1-10 # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from mock import Mock",
"return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return",
"name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is",
"FormModelGenerator(object): def __init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager,",
"fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field,",
"self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field,",
"ai ts=4 sts=4 et sw=4 encoding=utf-8 from mock import Mock from mangrove.form_model.field import",
"SelectField(label=\"What is your blood group?\", code=\"BG\", name=\"What is your blood group?\", options=[(\"O+\", \"a\"),",
") def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], )",
"associated entity?\", code=\"EID\", name=\"What is associatéd entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What",
"mock import Mock from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model",
"label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids",
"your blood group?\", code=\"BG\", name=\"What is your blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"),",
"self.blood_type_field = SelectField(label=\"What is your blood group?\", code=\"BG\", name=\"What is your blood group?\",",
"name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date in the following",
"name=\"What is associatéd entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting date?\",",
"SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\",",
"name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager,",
"def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What is associatéd entity?\")",
"(\"Memory loss\", \"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your",
"(\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field = GeoCodeField(name=\"field1_Loc\", code=\"gps\", label=\"Where do you stay?\")",
"blood group?\", code=\"BG\", name=\"What is your blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\",",
"your blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field",
"\"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your blood group?\", code=\"BG\", name=\"What is your",
"return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return",
"DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager",
"ts=4 sts=4 et sw=4 encoding=utf-8 from mock import Mock from mangrove.form_model.field import TextField,",
"form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field,",
"init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What is associatéd entity?\") self.rp_field",
"symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"),",
"blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field =",
"date in the following format: day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\",",
"the following format: day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat",
"FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\",",
"is associated entity?\", code=\"EID\", name=\"What is associatéd entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\",",
"summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self):",
"group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field = GeoCodeField(name=\"field1_Loc\",",
"import FormModel class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields() def form_model(self,",
"from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel class",
"FormModel class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"):",
"def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def",
"are symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\",",
"fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What",
"a date in the following format: day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are",
"def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self):",
"\"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is",
"self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\", \"a\"),",
"vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from mock import Mock from mangrove.form_model.field",
"import Mock from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import",
"fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field,",
"symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\",",
"sts=4 et sw=4 encoding=utf-8 from mock import Mock from mangrove.form_model.field import TextField, SelectField,",
"summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return",
"format: day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\",",
"entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must",
"class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return",
"FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager,",
") def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def",
"= database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field,",
"= UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What is associatéd entity?\") self.rp_field = DateField(label=\"Report",
"def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self):",
"\"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological disorders \", \"e\")], single_select_flag=False) self.blood_type_field =",
"UniqueIdField from mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager = database_manager",
"form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\",",
"self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What is associatéd entity?\") self.rp_field =",
"réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date in the following format: day.month.year.",
"\", \"e\")], single_select_flag=False) self.blood_type_field = SelectField(label=\"What is your blood group?\", code=\"BG\", name=\"What is",
"return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self):",
"FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager,",
"name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory",
"options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field = GeoCodeField(name=\"field1_Loc\", code=\"gps\",",
"Mock from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel",
"TextField, SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def __init__(self,",
"def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field]) def",
"day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid",
"form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def",
"from mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields()",
"form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\",",
"def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field],",
"instruction=\"Answer must be a date in the following format: day.month.year. Example: 25.12.2011\") self.symptoms_field",
"self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field,",
"\"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field = GeoCodeField(name=\"field1_Loc\", code=\"gps\", label=\"Where do you",
"fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\",",
"self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def",
"label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids",
"database_manager): self.database_manager = database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\",",
"25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\",",
"UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What is associatéd entity?\") self.rp_field = DateField(label=\"Report date\",",
"self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field],",
"return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field =",
"mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField from mangrove.form_model.form_model import FormModel class FormModelGenerator(object):",
"return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager,",
"in the following format: day.month.year. Example: 25.12.2011\") self.symptoms_field = SelectField(label=\"Zhat are symptoms?\", code=\"SY\",",
"FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What",
"form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], ) def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\",",
"date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date in the following format: day.month.year. Example:",
"self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field,",
"DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date",
"self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.rp_field, self.eid_field, self.symptoms_field,",
"code=\"EID\", name=\"What is associatéd entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting",
"= SelectField(label=\"Zhat are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry",
"name=\"What is your blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")],",
"database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field,",
"(\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field = GeoCodeField(name=\"field1_Loc\", code=\"gps\", label=\"Where do",
"are symptoms?\", code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\", \"2b\"),",
"is your blood group?\", code=\"BG\", name=\"What is your blood group?\", options=[(\"O+\", \"a\"), (\"O-\",",
"self.database_manager = database_manager self.init_form_model_fields() def form_model(self, form_code=\"cli002\"): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code,",
"is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a date in the following format:",
"options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"), (\"Memory loss\", \"d\"), (\"Neurological",
"\"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True) self.gps_field = GeoCodeField(name=\"field1_Loc\", code=\"gps\", label=\"Where",
"label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\",",
"from mock import Mock from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField from",
"mangrove.form_model.form_model import FormModel class FormModelGenerator(object): def __init__(self, database_manager): self.database_manager = database_manager self.init_form_model_fields() def",
"subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self):",
"name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field]) def summary_form_model_with_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids",
"form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\",",
"is associatéd entity?\") self.rp_field = DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\",",
"self.symptoms_field, self.blood_type_field]) def form_model_with_gps_question(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.gps_field], )",
"is your blood group?\", options=[(\"O+\", \"a\"), (\"O-\", \"b\"), (\"AB\", \"c\"), (\"B+\", \"d\")], single_select_flag=True)",
"self.blood_type_field], ) def summary_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field, self.symptoms_field, self.blood_type_field])",
") def init_form_model_fields(self): self.eid_field = UniqueIdField(unique_id_type='clinic',label=\"What is associated entity?\", code=\"EID\", name=\"What is associatéd",
"# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from mock import Mock from",
"FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return",
"= DateField(label=\"Report date\", code=\"RD\", name=\"What is réporting date?\", date_format=\"dd.mm.yyyy\", instruction=\"Answer must be a",
"label=\"Aids form_model\", form_code=form_code, fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\",",
"self.rp_field, self.symptoms_field, self.blood_type_field], ) def subject_form_model_without_rp(self): return FormModel(self.database_manager, name=\"AIDS\", label=\"Aids form_model\", form_code=\"cli002\", fields=[self.eid_field,",
"must be a date in the following format: day.month.year. Example: 25.12.2011\") self.symptoms_field =",
"code=\"SY\", name=\"Zhat are symptoms?\", options=[(\"Rapid weight loss\", \"a\"), (\"Dry cough\", \"2b\"), (\"Pneumonia\", \"c\"),"
] |
[
"ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm = float(i.value) / 10",
"= self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm = float(i.value) / 10 rx_power_actual[name] =",
"rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {} for ind,",
"\"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } }",
"None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type =",
"= MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS +",
"None self._ciena_model = None self._snmp_connection = None self._max_repetitions = None self._polling_execute_frequency = None",
"rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {} for ind, name in",
"\"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual,",
"\"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": {",
"}, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\":",
"\"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups = [ {",
"tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": { \"method\": \"static\", \"values\":",
"Ciena Waveserver devices for transceiver light level Metrics \"\"\" from cached_property import threaded_cached_property",
"{}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\":",
"threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import",
"\"tx_light_level.$index\" } } } ] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf()",
"cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS",
"in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for",
"from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin",
"self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i",
"__init__(self): self._plugin_context = None self._logger = None self._ciena_model = None self._snmp_connection = None",
"super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType)",
"'4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value",
"rx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.'",
"10 rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {} for",
"tx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.'",
"= tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": { \"method\": \"static\",",
"\"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels",
"import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\",
"{} to enrichment group for the device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group)",
"{}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment for device {}: {}'.format(self.device_fqdn, self.enrichment_group_set))",
"in tx: tx_dbm = float(i.value) / 10 tx_power_actual[name] = tx_dbm return tx_power_actual def",
"varbind in varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if",
"for transceiver light level Metrics \"\"\" from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import",
"= MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class",
"self._ciena_model = None self._snmp_connection = None self._max_repetitions = None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment,",
"None self._logger = None self._ciena_model = None self._snmp_connection = None self._max_repetitions = None",
"= None self._max_repetitions = None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self):",
"for varbind in varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface:",
"= None self._snmp_connection = None self._max_repetitions = None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__()",
"in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {}",
"def _xcvr_tx_power_levels(self): tx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual",
"cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm",
"metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id",
"varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {} for ind, name in",
"@threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index",
"{} for ind, name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0')",
"self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index = [] for varbind in",
"= { \"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\",",
"_xcvr_tx_power_levels(self): tx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual +",
"_build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": {",
"\"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\": \"light_levels\", \"dimensions\":",
"\"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } } }",
"_xcvr_rx_power_levels(self): rx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual +",
"self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups",
"xcvr_index = [] for varbind in varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index) for",
"_build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": {",
"def _build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\":",
"+ '.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm =",
"{ \"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\":",
"Plugin that can poll Ciena Waveserver devices for transceiver light level Metrics \"\"\"",
"\"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\":",
"{ \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } } } ] def get_enrichment(self): self._ciena_model =",
"tx_power_actual[name] = tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": { \"method\":",
"/ 10 tx_power_actual[name] = tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\":",
"self._metrics_groups = [ { \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\":",
"self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": {",
"in rx: rx_dbm = float(i.value) / 10 rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property",
"\"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\":",
"} } def _build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\":",
"= self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index = [] for varbind",
"@property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface =",
"\"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\":",
"cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp",
"[] for varbind in varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int in",
"i in tx: tx_dbm = float(i.value) / 10 tx_power_actual[name] = tx_dbm return tx_power_actual",
"import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS",
"\"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e: self._logger.error('Error",
"yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName =",
"\"\"\" from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet",
"+ ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm = float(i.value) /",
"yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType",
"{ \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e:",
"ind, name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx =",
"import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS =",
"varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index = [] for",
"cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels }",
"rx = self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm = float(i.value) / 10 rx_power_actual[name]",
"_xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index = []",
"ind, name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx =",
"level Metrics \"\"\" from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment",
"module implements a Panoptes Plugin that can poll Ciena Waveserver devices for transceiver",
"varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self):",
"varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if varbind_int.index in",
"cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm",
"\"\"\" This module implements a Panoptes Plugin that can poll Ciena Waveserver devices",
"= varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {} for ind, name",
"rx_dbm = float(i.value) / 10 rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self):",
"to enrichment group for the device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics",
"+ '.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm =",
"= [ { \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\",",
"cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\": \"light_levels\",",
"}, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\":",
"self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index = [] for varbind in varbinds_int_type: if varbind.value",
"for ind, name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx",
"None self._max_repetitions = None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return",
"cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS",
"for varbind_int in varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id",
"for ind, name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx",
"def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map,",
"self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set))",
"xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return",
"\"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e: self._logger.error('Error while adding",
"= '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual",
"name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid)",
"def __init__(self): self._plugin_context = None self._logger = None self._ciena_model = None self._snmp_connection =",
"self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self):",
"adding enrichment set {} to enrichment group for the device {}: {}'. format(enrichment_set,",
"for the device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment for device",
"= self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index = [] for varbind in varbinds_int_type: if",
"\"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } }",
"self._logger.error('Error while adding enrichment set {} to enrichment group for the device {}:",
"<filename>yahoo_panoptes/plugins/enrichment/generic/snmp/ciena/waveserver/plugin_enrichment_cienaws_light_metrics.py<gh_stars>10-100 \"\"\" This module implements a Panoptes Plugin that can poll Ciena Waveserver",
"} } } ] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set",
"pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None self._logger = None self._ciena_model =",
"+ '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class",
"MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None",
"get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\":",
"= None self._logger = None self._ciena_model = None self._snmp_connection = None self._max_repetitions =",
"CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {}",
"poll Ciena Waveserver devices for transceiver light level Metrics \"\"\" from cached_property import",
"for i in rx: rx_dbm = float(i.value) / 10 rx_power_actual[name] = rx_dbm return",
"try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e: self._logger.error('Error while adding enrichment set {}",
"in varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if varbind_int.index",
"{ \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" },",
"/ 10 rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {}",
"\"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\"",
"from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from",
"\"value\": \"tx_light_level.$index\" } } } ] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map()",
"} try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e: self._logger.error('Error while adding enrichment set",
"self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm = float(i.value) / 10 tx_power_actual[name] = tx_dbm",
"as e: self._logger.error('Error while adding enrichment set {} to enrichment group for the",
"= self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm = float(i.value) / 10 tx_power_actual[name] =",
"{ \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups =",
"= cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i in rx:",
"self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\": \"light_levels\", \"dimensions\": {},",
"\"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups",
"@threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): tx_oid =",
"\"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\",",
"= None self._ciena_model = None self._snmp_connection = None self._max_repetitions = None self._polling_execute_frequency =",
"\"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\":",
"= MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS +",
"self._snmp_connection = None self._max_repetitions = None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def",
"10 tx_power_actual[name] = tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": {",
"MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin):",
"\"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } } } ] def get_enrichment(self): self._ciena_model",
"] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\":",
"def _xcvr_rx_power_levels(self): rx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual",
"+ '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None self._logger",
"light level Metrics \"\"\" from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from",
"in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for",
"return tx_power_actual def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id",
"in varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property def",
"} } ] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set =",
"rx: rx_dbm = float(i.value) / 10 rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property def",
"enrichment group for the device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment",
"= { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as",
"float(i.value) / 10 tx_power_actual[name] = tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map = {",
"\"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" },",
"== '4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index] =",
"\"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels",
"\"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual,",
"\"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": {",
"rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i in",
"This module implements a Panoptes Plugin that can poll Ciena Waveserver devices for",
"devices for transceiver light level Metrics \"\"\" from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp",
"@threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): rx_oid =",
"'.' + ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm = float(i.value)",
"Exception as e: self._logger.error('Error while adding enrichment set {} to enrichment group for",
"self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i",
"ind.strip('.0') rx = self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm = float(i.value) / 10",
"PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS +",
"tx_dbm = float(i.value) / 10 tx_power_actual[name] = tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map",
"\"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups = [",
"self._logger = None self._ciena_model = None self._snmp_connection = None self._max_repetitions = None self._polling_execute_frequency",
"interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {} for ind,",
"= {} for ind, name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.' +",
"{} for ind, name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0')",
"tx_power_actual def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id },",
"\"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels },",
"\"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\":",
"'.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual =",
"rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): tx_oid",
"self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except",
"set {} to enrichment group for the device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e)))",
"transceiver light level Metrics \"\"\" from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup",
"implements a Panoptes Plugin that can poll Ciena Waveserver devices for transceiver light",
"except Exception as e: self._logger.error('Error while adding enrichment set {} to enrichment group",
"'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn,",
"def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index =",
"'.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual =",
"return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items():",
"self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e: self._logger.error('Error while",
"self._max_repetitions = None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment",
"self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e: self._logger.error('Error while adding enrichment set {} to",
"enrichment_set)) except Exception as e: self._logger.error('Error while adding enrichment set {} to enrichment",
"Waveserver devices for transceiver light level Metrics \"\"\" from cached_property import threaded_cached_property from",
"def _build_metrics_oids_map(self): self._oids_map = { \"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\":",
"interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items(): rx_oid",
"{}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment for device {}: {}'.format(self.device_fqdn, self.enrichment_group_set)) return",
"= None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type",
"= {} for ind, name in self._xcvr_interfaces_id.items(): rx_oid = cwsXcvrRxPowerActual + '.' +",
"MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2'",
"tx: tx_dbm = float(i.value) / 10 tx_power_actual[name] = tx_dbm return tx_power_actual def _build_metrics_oids_map(self):",
"\\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType =",
"} ] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = {",
"return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id =",
"\"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } } } ] def",
"{} xcvr_index = [] for varbind in varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index)",
"\"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } } } ]",
"return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {} for ind, name in self._xcvr_interfaces_id.items():",
"= MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context =",
"enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception",
"self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface",
"Metrics \"\"\" from cached_property import threaded_cached_property from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import",
"xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual = {} for",
"if varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual",
"\"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } } } ] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model',",
"enrichment set {} to enrichment group for the device {}: {}'. format(enrichment_set, self.device_fqdn,",
"= rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual = {} for ind, name",
"{ \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" }",
"self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def",
"cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context",
"= None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property",
"interface_id = {} xcvr_index = [] for varbind in varbinds_int_type: if varbind.value ==",
"\"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\",",
"that can poll Ciena Waveserver devices for transceiver light level Metrics \"\"\" from",
"MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2'",
"CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None self._logger = None self._ciena_model = None self._snmp_connection",
"+ ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm = float(i.value) /",
"CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None self._logger = None self._ciena_model",
"+ '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def",
"= float(i.value) / 10 tx_power_actual[name] = tx_dbm return tx_power_actual def _build_metrics_oids_map(self): self._oids_map =",
"+ '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual",
"self._plugin_context = None self._logger = None self._ciena_model = None self._snmp_connection = None self._max_repetitions",
"= self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups }",
"= {} xcvr_index = [] for varbind in varbinds_int_type: if varbind.value == '4':",
"= [] for varbind in varbinds_int_type: if varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int",
"tx = self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm = float(i.value) / 10 tx_power_actual[name]",
"{ \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\":",
"varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index]",
"class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None self._logger = None self._ciena_model = None",
"e: self._logger.error('Error while adding enrichment set {} to enrichment group for the device",
"a Panoptes Plugin that can poll Ciena Waveserver devices for transceiver light level",
"varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName) interface_id = {} xcvr_index = [] for varbind in varbinds_int_type:",
"can poll Ciena Waveserver devices for transceiver light level Metrics \"\"\" from cached_property",
"'.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i in tx: tx_dbm = float(i.value)",
"self.metrics_groups } try: self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set)) except Exception as e: self._logger.error('Error while adding enrichment",
"\"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\",",
"format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment for device {}: {}'.format(self.device_fqdn, self.enrichment_group_set)) return self.enrichment_group_set",
"self._snmp_connection.bulk_walk(rx_oid) for i in rx: rx_dbm = float(i.value) / 10 rx_power_actual[name] = rx_dbm",
"[ { \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\":",
"{ \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": { \"xcvr_interfaces\": { \"metric_type\": \"gauge\", \"value\": \"xcvr_interfaces.$index\"",
"'.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup):",
"varbind_int in varbinds_interface: if varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property",
"if varbind.value == '4': xcvr_index.append(varbind.index) for varbind_int in varbinds_interface: if varbind_int.index in xcvr_index:",
"{ \"method\": \"bulk_walk\", \"oid\": cwsXcvrRxPowerActual, \"values\": self._xcvr_rx_power_levels }, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\":",
"device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment for device {}: {}'.format(self.device_fqdn,",
"varbind_int.index in xcvr_index: interface_id[varbind_int.index] = varbind_int.value return interface_id @threaded_cached_property def _xcvr_rx_power_levels(self): rx_power_actual =",
"yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS",
"cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass",
"MIB_CIENA_CHASSIS + '.3.4.7.4.1.2' cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2' cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2'",
"{ \"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\": \"bulk_walk\", \"oid\":",
"}, \"tx_light_level\": { \"method\": \"bulk_walk\", \"oid\": cwsXcvrTxPowerActual, \"values\": self._xcvr_tx_power_levels } } def _build_metrics_groups_conf(self):",
"}, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\" } } } ] def get_enrichment(self):",
"while adding enrichment set {} to enrichment group for the device {}: {}'.",
"def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def _xcvr_interfaces_id(self): varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType) varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName)",
"None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property def metrics_enrichment_class(self): return CienaWSLightMetricsEnrichment @threaded_cached_property def",
"import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS",
"for i in tx: tx_dbm = float(i.value) / 10 tx_power_actual[name] = tx_dbm return",
"Panoptes Plugin that can poll Ciena Waveserver devices for transceiver light level Metrics",
"class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None self._logger = None",
"None self._snmp_connection = None self._max_repetitions = None self._polling_execute_frequency = None super(CienaPluginWSLightMetricsEnrichment, self).__init__() @property",
"PanoptesGenericSNMPMetricsEnrichmentGroup from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271'",
"= float(i.value) / 10 rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual",
"name in self._xcvr_interfaces_id.items(): tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid)",
"self._oids_map = { \"xcvr_interfaces\": { \"method\": \"static\", \"values\": self._xcvr_interfaces_id }, \"rx_light_level\": { \"method\":",
"} def _build_metrics_groups_conf(self): self._metrics_groups = [ { \"group_name\": \"light_levels\", \"dimensions\": {}, \"metrics\": {",
"PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS +",
"'.3.4.8.11.1.2' cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self):",
"tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i in",
"'.3.4.8.13.1.2' class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup): pass class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin): def __init__(self): self._plugin_context = None self._logger =",
"from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2'",
"float(i.value) / 10 rx_power_actual[name] = rx_dbm return rx_power_actual @threaded_cached_property def _xcvr_tx_power_levels(self): tx_power_actual =",
"\"value\": \"xcvr_interfaces.$index\" }, \"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\":",
"= cwsXcvrTxPowerActual + '.' + ind.strip('.0') tx = self._snmp_connection.bulk_walk(tx_oid) for i in tx:",
"\"rx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"rx_light_level.$index\" }, \"tx_light_level\": { \"metric_type\": \"gauge\", \"value\": \"tx_light_level.$index\"",
"i in rx: rx_dbm = float(i.value) / 10 rx_power_actual[name] = rx_dbm return rx_power_actual",
"group for the device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment for",
"from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \\ import PanoptesEnrichmentGenericSNMPPlugin MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271' cwsPortIdName",
"the device {}: {}'. format(enrichment_set, self.device_fqdn, repr(e))) self.enrichment_group_set.add_enrichment_group(self.enrichment_group) self._logger.debug('Metrics enrichment for device {}:",
"\"gauge\", \"value\": \"tx_light_level.$index\" } } } ] def get_enrichment(self): self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown')",
"self._plugin_context.data.resource_metadata.get('model', 'unknown') self._build_metrics_oids_map() self._build_metrics_groups_conf() enrichment_set = { \"oids\": self.oids_map, \"metrics_groups\": self.metrics_groups } try:"
] |
[
"parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp, parm =",
"get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm = get_param_str()) @fpbp.route('/ddos')",
"c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def",
"@fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def",
"= get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/')",
"def overview(): return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return",
"= get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm = get_param_str())",
"exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str())",
"def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None):",
"@fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/')",
"the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in",
"@fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp, parm = get_param_str())",
"flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp,",
"parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm =",
"get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack')",
"exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str())",
"@fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None):",
"@fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>')",
"get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri')",
"CONDITIONS OF ANY KIND, either express or implied. See the License for the",
"limitations under the License. \"\"\" from flask import Flask, Blueprint, request import flask",
"of Luxembourg Licensed under the Apache License, Version 2.0 (the \"License\"); you may",
"= get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm = get_param_str())",
"def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html',",
"exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str())",
"OR CONDITIONS OF ANY KIND, either express or implied. See the License for",
"License. \"\"\" from flask import Flask, Blueprint, request import flask import urllib fpbp",
"\"\"\" (c) Copyright 2015 <NAME>, SnT, University of Luxembourg Licensed under the Apache",
"OF ANY KIND, either express or implied. See the License for the specific",
"to in writing, software distributed under the License is distributed on an \"AS",
"botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp",
"parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux')",
"= get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm = get_param_str())",
"distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"bot(exp=None): return flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return",
"parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server')",
"not use this file except in compliance with the License. You may obtain",
"res @fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>')",
"License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required",
"@fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm = get_param_str()) @fpbp.route('/ddos') def ddos():",
"return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm",
"flask.render_template('c2_server.html', exp = exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return",
"parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>')",
"= exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp,",
"def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None):",
"except in compliance with the License. You may obtain a copy of the",
"may not use this file except in compliance with the License. You may",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the",
"def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html',",
"<NAME>, SnT, University of Luxembourg Licensed under the Apache License, Version 2.0 (the",
"governing permissions and limitations under the License. \"\"\" from flask import Flask, Blueprint,",
"flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp,",
"under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR",
"permissions and limitations under the License. \"\"\" from flask import Flask, Blueprint, request",
"Luxembourg Licensed under the Apache License, Version 2.0 (the \"License\"); you may not",
"import urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args) if",
"@fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>')",
"flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp,",
"an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"@fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>')",
"parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm =",
"@fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None):",
"return flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html',",
"specific language governing permissions and limitations under the License. \"\"\" from flask import",
"@fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def",
"malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp,",
"@fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>')",
"g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return",
"def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None):",
"flask import Flask, Blueprint, request import flask import urllib fpbp = Blueprint('frontpage', __name__,",
"urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args) if res",
"and limitations under the License. \"\"\" from flask import Flask, Blueprint, request import",
"@fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/')",
"obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law",
"from flask import Flask, Blueprint, request import flask import urllib fpbp = Blueprint('frontpage',",
"the License for the specific language governing permissions and limitations under the License.",
"flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp,",
"ANY KIND, either express or implied. See the License for the specific language",
"return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html',",
"Copyright 2015 <NAME>, SnT, University of Luxembourg Licensed under the Apache License, Version",
"vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return",
"@fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def",
"return flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html',",
"malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp,",
"request import flask import urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res",
"file except in compliance with the License. You may obtain a copy of",
"License for the specific language governing permissions and limitations under the License. \"\"\"",
"@fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None):",
"def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html',",
"Unless required by applicable law or agreed to in writing, software distributed under",
"= get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str())",
"Flask, Blueprint, request import flask import urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def",
"License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,",
"\"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm = get_param_str())",
"2.0 (the \"License\"); you may not use this file except in compliance with",
"\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp =",
"get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>')",
"the specific language governing permissions and limitations under the License. \"\"\" from flask",
"flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign')",
"See the License for the specific language governing permissions and limitations under the",
"= get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str())",
"Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args) if res != \"\": return",
"@fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/')",
"@fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm = get_param_str()) @fpbp.route('/ddos') def",
"University of Luxembourg Licensed under the Apache License, Version 2.0 (the \"License\"); you",
"copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed",
"return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp,",
"get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops')",
"@fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/')",
"@fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return",
"exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm",
"@fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def",
"return flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html',",
"parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri')",
"the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless",
"the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS",
"import Flask, Blueprint, request import flask import urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\")",
"\"\"\" from flask import Flask, Blueprint, request import flask import urllib fpbp =",
"flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm",
"License, Version 2.0 (the \"License\"); you may not use this file except in",
"compliance with the License. You may obtain a copy of the License at",
"under the License. \"\"\" from flask import Flask, Blueprint, request import flask import",
"(the \"License\"); you may not use this file except in compliance with the",
"this file except in compliance with the License. You may obtain a copy",
"spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return",
"def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None):",
"flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp,",
"return \"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm =",
"overview(): return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html',",
"\"License\"); you may not use this file except in compliance with the License.",
"express or implied. See the License for the specific language governing permissions and",
"def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm = get_param_str()) @fpbp.route('/ddos') def ddos(): return flask.render_template('ddos.html')",
"exp = exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html',",
"is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"= get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None): return flask.render_template('graphs.html', exp=exp, parm = get_param_str())",
"the License. \"\"\" from flask import Flask, Blueprint, request import flask import urllib",
"get_param_str(): res = urllib.parse.urlencode(request.args) if res != \"\": return \"?\"+res else: return res",
"!= \"\": return \"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html',",
"SnT, University of Luxembourg Licensed under the Apache License, Version 2.0 (the \"License\");",
"you may not use this file except in compliance with the License. You",
"get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None): return flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>')",
"return res @fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/')",
"\"\": return \"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm",
"get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot')",
"agreed to in writing, software distributed under the License is distributed on an",
"import flask import urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res =",
"fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return",
"__name__, template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args) if res != \"\": return \"?\"+res",
"@fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph') @fpbp.route('/graph/') @fpbp.route('/graph/<exp>') def g1(exp=None):",
"distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES",
"You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by",
"exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm",
"res != \"\": return \"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview') def overview(): return",
"return flask.render_template('c2_server.html', exp = exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None):",
"may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable",
"<reponame>tigran-a/ACDCStats #!/usr/bin/env python3 \"\"\" (c) Copyright 2015 <NAME>, SnT, University of Luxembourg Licensed",
"software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT",
"by applicable law or agreed to in writing, software distributed under the License",
"applicable law or agreed to in writing, software distributed under the License is",
"implied. See the License for the specific language governing permissions and limitations under",
"= get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/')",
"@fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm = get_param_str()) @fpbp.route('/ddos') def ddos(): return",
"@fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>')",
"python3 \"\"\" (c) Copyright 2015 <NAME>, SnT, University of Luxembourg Licensed under the",
"http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed",
"if res != \"\": return \"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview') def overview():",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License",
"template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args) if res != \"\": return \"?\"+res else:",
"License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF",
"def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def botnet(exp=None):",
"Blueprint, request import flask import urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str():",
"def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None):",
"@fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def",
"language governing permissions and limitations under the License. \"\"\" from flask import Flask,",
"get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet')",
"parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def tops(exp=None): return flask.render_template('tops.html', exp=exp, parm =",
"= Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args) if res != \"\":",
"@fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str()) @fpbp.route('/tops') @fpbp.route('/tops/') @fpbp.route('/tops/<exp>') def",
"fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args) if res !=",
"@fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm = get_param_str()) @fpbp.route('/botnet') @fpbp.route('/botnet/') @fpbp.route('/botnet/<exp>') def",
"law or agreed to in writing, software distributed under the License is distributed",
"#!/usr/bin/env python3 \"\"\" (c) Copyright 2015 <NAME>, SnT, University of Luxembourg Licensed under",
"IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See",
"@fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def",
"exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm",
"Version 2.0 (the \"License\"); you may not use this file except in compliance",
"@fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>')",
"in compliance with the License. You may obtain a copy of the License",
"parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return flask.render_template('bot.html', exp=exp, parm =",
"@fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/')",
"exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp, parm",
"@fpbp.route('/malware/<exp>') def malware(exp=None): return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return",
"the Apache License, Version 2.0 (the \"License\"); you may not use this file",
"use this file except in compliance with the License. You may obtain a",
"(c) Copyright 2015 <NAME>, SnT, University of Luxembourg Licensed under the Apache License,",
"else: return res @fpbp.route('/') @fpbp.route('/overview') def overview(): return flask.render_template('overview.html', parm = get_param_str()) @fpbp.route('/graph')",
"KIND, either express or implied. See the License for the specific language governing",
"of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to",
"return flask.render_template('malware.html', exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str())",
"exp=exp, parm=get_param_str()) @fpbp.route('/malicious_uri') @fpbp.route('/malicious_uri/') @fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/')",
"@fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def",
"Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use",
"parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm =",
"flask.render_template('graphs.html', exp=exp, parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp,",
"def get_param_str(): res = urllib.parse.urlencode(request.args) if res != \"\": return \"?\"+res else: return",
"in writing, software distributed under the License is distributed on an \"AS IS\"",
"under the Apache License, Version 2.0 (the \"License\"); you may not use this",
"@fpbp.route('/malicious_uri/<exp>') def malicious_uri(exp=None): return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return",
"2015 <NAME>, SnT, University of Luxembourg Licensed under the Apache License, Version 2.0",
"@fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/')",
"for the specific language governing permissions and limitations under the License. \"\"\" from",
"writing, software distributed under the License is distributed on an \"AS IS\" BASIS,",
"= urllib.parse.urlencode(request.args) if res != \"\": return \"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview')",
"a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or",
"res = urllib.parse.urlencode(request.args) if res != \"\": return \"?\"+res else: return res @fpbp.route('/')",
"either express or implied. See the License for the specific language governing permissions",
"@fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/')",
"exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html', exp=exp, parm",
"flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str()) @fpbp.route('/spam_campaign') @fpbp.route('/spam_campaign/') @fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm =",
"or agreed to in writing, software distributed under the License is distributed on",
"flask import urllib fpbp = Blueprint('frontpage', __name__, template_folder=\"templ\") def get_param_str(): res = urllib.parse.urlencode(request.args)",
"attack(exp=None): return flask.render_template('attack.html', exp=exp, parm = get_param_str()) @fpbp.route('/bot') @fpbp.route('/bot/') @fpbp.route('/bot/<exp>') def bot(exp=None): return",
"parm = get_param_str()) @fpbp.route('/attack') @fpbp.route('/attack/') @fpbp.route('/attack/<exp>') def attack(exp=None): return flask.render_template('attack.html', exp=exp, parm =",
"def c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp, parm = get_param_str()) @fpbp.route('/malware') @fpbp.route('/malware/') @fpbp.route('/malware/<exp>')",
"flask.render_template('botnet.html', exp=exp, parm=get_param_str()) @fpbp.route('/c2_server') @fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp,",
"exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html', exp=exp, parm",
"Apache License, Version 2.0 (the \"License\"); you may not use this file except",
"or implied. See the License for the specific language governing permissions and limitations",
"return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def fast_flux(exp=None): return flask.render_template('fast_flux.html',",
"with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0",
"return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str()) @fpbp.route('/vulnerable_uri') @fpbp.route('/vulnerable_uri/') @fpbp.route('/vulnerable_uri/<exp>') def vulnerable_uri(exp=None): return flask.render_template('vulnerable_uri.html',",
"required by applicable law or agreed to in writing, software distributed under the",
"at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software",
"urllib.parse.urlencode(request.args) if res != \"\": return \"?\"+res else: return res @fpbp.route('/') @fpbp.route('/overview') def",
"@fpbp.route('/c2_server/') @fpbp.route('/c2_server/<exp>') def c2_server(exp=None): return flask.render_template('c2_server.html', exp = exp, parm = get_param_str()) @fpbp.route('/malware')",
"@fpbp.route('/spam_campaign/<exp>') def spam_campaign(exp=None): return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str()) @fpbp.route('/fast_flux') @fpbp.route('/fast_flux/') @fpbp.route('/fast_flux/<exp>') def"
] |
[
"self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test",
"{ \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data = { \"title\": \"Programming",
"= { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }}",
"= { \"comment\": { \"body\": \"Nice Idea\" }} def register_user(self, user_details): \"\"\"Sign up",
"= response.data['slug'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token",
"slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects",
"= json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token",
"response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code,",
"\"comment\": { \"body\": \"Nice Idea\" }} def register_user(self, user_details): \"\"\"Sign up a new",
"variety of programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text =",
"highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug':",
"self.client = APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = { \"user\":",
"self.assertEqual(response.data['error'], 'Start of highlight and end of highlight' ' indices should be both",
"\"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = { \"comment\": {",
"kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all comments response = self.client.get(",
"reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single comment article_id =",
"register.data[\"token\"] return token def create_article(self, token): \"\"\"Create an article.\"\"\" response = self.client.post( self.create_article_url,",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all comments response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}),",
"= self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight",
"slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"# update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"{}'.format(token)) slug = response.data['slug'] # highlight a text and comment it response =",
"= self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token",
"highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token",
"\"body\": \"Good work here!!\", \"end_highlight_position\": 15 }} self.update_comment = { \"comment\": { \"body\":",
"response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token = self.register_user(self.user_two_details) #",
"response.data['slug'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token),",
"\"\"\"Sign up a new user to get a token\"\"\" register = self.client.post(self.signup_url, user_details,",
"= self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token",
"self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the comment response =",
"from rest_framework.views import status from rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self):",
"test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"], \"The comment does not exist\") self.assertEqual(response.status_code,",
"# get all comments response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data =",
"response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger than end index.\"\"\"",
"import status from rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client =",
"= self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token",
"register = self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"] return token def create_article(self, token):",
"format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single",
"response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token = self.register_user(self.user_two_details) #",
"self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details) # create an",
"should not ' 'be greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test",
"{}'.format(token), format='json') # delete single comment article_id = response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug,",
"self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token = self.register_user(self.user_two_details)",
"self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"] return token def create_article(self, token): \"\"\"Create an",
"15 }} self.update_comment = { \"comment\": { \"body\": \"Nice Idea\" }} def register_user(self,",
"Languages\", \"body\": \"There are variety of programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\",",
"token\"\"\" register = self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"] return token def create_article(self,",
"self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token = self.register_user(self.user_two_details) slug =",
"response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update",
"\"title\": \"Programming Languages\", \"body\": \"There are variety of programming languagr\", \"description\": \"Programming\", \"tagList\":",
"format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\"",
"response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'],",
"self.highlighted_text = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15",
"rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient() self.signup_url =",
"= { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }}",
"# get single comment article_id = response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json')",
"response.data['id'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token),",
"self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token = self.register_user(self.user_two_details) # create",
"response.data['slug'] # highlight a text and comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text,",
"def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token = self.register_user(self.user_two_details) # create an article",
"test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type that are not integers.\"\"\" token = self.register_user(self.user_two_details)",
"self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token))",
"# update the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"\"body\": \"There are variety of programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"]",
"= self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The",
"'/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does",
"single comment article_id = response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data =",
"comment article_id = response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data",
"\"\"\"Create an article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug =",
"of highlight and end of highlight' ' indices should be both integers') self.assertEqual(response.status_code,",
"slug = response.data['slug'] # highlight a text and comment on it response =",
"self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self):",
"format='json') # delete single comment article_id = response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id),",
"self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token",
"= reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = { \"user\": { \"username\": \"andela\", \"email\":",
"single comment article_id = response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code,",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def",
"article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return",
"reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger than end index.\"\"\" token = self.register_user(self.user_two_details) slug",
"self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"], \"The comment does not exist\")",
"not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details) #",
"{}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight and end of highlight' ' indices should",
"{ \"body\": \"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field = {",
"}} def register_user(self, user_details): \"\"\"Sign up a new user to get a token\"\"\"",
"response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token = self.register_user(self.user_two_details) #",
"user to get a token\"\"\" register = self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"]",
"get all comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post(",
"response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"],",
"\"Programming Languages\", \"body\": \"There are variety of programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\",",
"import reverse from rest_framework.views import status from rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase):",
"user_details, format='json') token = register.data[\"token\"] return token def create_article(self, token): \"\"\"Create an article.\"\"\"",
"rest_framework.views import status from rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client",
"work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field = { \"comment\": { \"body\":",
"= self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single comment",
"format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def",
"2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"], \"The comment does not",
"slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"slug = response.data['slug'] # highlight a text and comment it self.client.post( reverse('articles:high_light', kwargs={'slug':",
"[\"Programming\", \"language\", \"python\"] } self.highlighted_text = { \"comment\": { \"body\": \"Good work here!!\",",
"response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self):",
"= { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }}",
"self.assertEqual(response.data['error'], 'The comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\"",
"self.missing_field = { \"comment\": { \"body\": \"Good work here!!\", \"end_highlight_position\": 15 }} self.update_comment",
"a new user to get a token\"\"\" register = self.client.post(self.signup_url, user_details, format='json') token",
"= response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data)",
"= self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return slug def",
"index larger than end index.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response =",
"self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return slug def test_comment_highlighted_text(self):",
"unexisting comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url,",
"an article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug']",
"on highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\"",
"import json from django.urls import reverse from rest_framework.views import status from rest_framework.test import",
"{ \"body\": \"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype = {",
"response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single",
"up a new user to get a token\"\"\" register = self.client.post(self.signup_url, user_details, format='json')",
"self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should not ' 'be greater or",
"of programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text = {",
"token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field,",
"= self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"index data type that are not integers.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token)",
"response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text',",
"class CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate')",
"reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single comment article_id =",
"APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = { \"user\": { \"username\":",
"def setUp(self): self.client = APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details =",
"from django.urls import reverse from rest_framework.views import status from rest_framework.test import APITestCase, APIClient",
"{ \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field",
"{ \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should not",
"slug = response.data['slug'] # update the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment,",
"article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text deleted",
"status from rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient()",
"comment it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') #",
"article_id = response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text',",
"response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment",
"the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data =",
"response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return slug",
"= self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK)",
"end index.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug':",
"indices should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\"",
"reverse('articles:articles-listcreate') self.user_two_details = { \"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }}",
"self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token = self.register_user(self.user_two_details) # create",
"slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self):",
"\"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data = { \"title\": \"Programming Languages\",",
"field.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}),",
"comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data))",
"'/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK)",
"\"end_highlight_position\": 15 }} self.missing_field = { \"comment\": { \"body\": \"Good work here!!\", \"end_highlight_position\":",
"\"\"\"Test for missing field.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post(",
"\"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data = { \"title\":",
"kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test",
"token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url, self.create_article_data, format='json',",
"{}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text deleted successfully') self.assertEqual(response.status_code,",
"user_details): \"\"\"Sign up a new user to get a token\"\"\" register = self.client.post(self.signup_url,",
"HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text and comment it response",
"data type that are not integers.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response",
"\"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text = { \"comment\": { \"body\":",
"slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight and end of highlight'",
"{}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"], \"The comment does not exist\") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"\"one\", \"end_highlight_position\": 15 }} self.missing_field = { \"comment\": { \"body\": \"Good work here!!\",",
"APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url =",
"slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get",
"status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details) # create an article",
"languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text = { \"comment\": {",
"self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token),",
"self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token),",
"self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single comment article_id = response.data['id'] response =",
"new user to get a token\"\"\" register = self.client.post(self.signup_url, user_details, format='json') token =",
"kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def",
"required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token = self.register_user(self.user_two_details) #",
"def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article",
"missing field.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug':",
"self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def",
"comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url, self.create_article_data,",
"'/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"], \"The comment does",
"kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single comment article_id = response.data['id']",
"self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the comment response",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get",
"response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code,",
"'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token =",
"a text and comment on it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text,",
"response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight",
"def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response",
"= self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data)",
"for missing field.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light',",
"highlight and end of highlight' ' indices should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)",
"{}'.format(token)) slug = response.data['slug'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2),",
"return token def create_article(self, token): \"\"\"Create an article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data,",
"def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger than end index.\"\"\" token = self.register_user(self.user_two_details)",
"slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single comment article_id = response.data['id'] response",
"HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\"",
"= self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def",
"self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text and",
"text and comment on it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token",
"= response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data))",
"self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test comment",
"index.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}),",
"}} self.invalid_index_datatype = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\":",
"larger than end index.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post(",
"article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def",
"get a token\"\"\" register = self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"] return token",
"here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field = { \"comment\": { \"body\": \"Good",
"response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position",
"self.assertEqual(response.data['message'], 'Comment on highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update",
"self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position",
"self.update_comment = { \"comment\": { \"body\": \"Nice Idea\" }} def register_user(self, user_details): \"\"\"Sign",
"CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code,",
"\"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text = { \"comment\": { \"body\": \"Good",
"{}'.format(token)) slug = response.data['slug'] # highlight a text and comment on it response",
"'/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text",
"slug = response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token =",
"\"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data = { \"title\": \"Programming Languages\", \"body\": \"There",
"'be greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data",
"APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url",
"update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data",
"and comment on it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token),",
"\"comment\": { \"body\": \"Good work here!!\", \"end_highlight_position\": 15 }} self.update_comment = { \"comment\":",
"self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted",
"reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self):",
"format='json') self.assertEqual(response.data['error'], 'Start of highlight and end of highlight' ' indices should be",
"# update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger than end",
"does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details)",
"self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index",
"single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url,",
"reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all comments response =",
"status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type that are not integers.\"\"\" token",
"status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token = self.register_user(self.user_two_details) # create an",
"slug = response.data['slug'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment,",
"should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token",
"{}'.format(token), format='json') # get all comments response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json')",
"self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token),",
"{}'.format(token)) slug = response.data['slug'] # update the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2),",
"response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single",
"rejects start index larger than end index.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token)",
"# create an article response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug",
"end of highlight' ' indices should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self):",
"def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details) # create an article response",
"self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token = self.register_user(self.user_two_details) # create",
"or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type that",
"json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token =",
"self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single comment article_id = response.data['id'] response =",
"update single comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post(",
"\"python\"] } self.highlighted_text = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 2,",
"a text and comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\": \"Good work here!!\",",
"= { \"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data =",
"def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type that are not integers.\"\"\" token =",
"{}'.format(token)) slug = response.data['slug'] # highlight a text and comment it self.client.post( reverse('articles:high_light',",
"update unexisting comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post(",
"def register_user(self, user_details): \"\"\"Sign up a new user to get a token\"\"\" register",
"slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"\"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data = { \"title\": \"Programming Languages\", \"body\": \"There are",
"= self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single comment",
"= response.data['slug'] # update the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token",
"{ \"body\": \"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = {",
"test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response =",
"HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug,",
"# highlight a text and comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token",
"programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text = { \"comment\":",
"slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is",
"= APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = { \"user\": {",
"create_article(self, token): \"\"\"Create an article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token))",
"response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\"",
"be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token =",
"self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = { \"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\":",
"\"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\": \"Good work",
"= self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id'] #",
"self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id'] # update the comment response =",
"greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type",
"reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight and end",
"highlight' ' indices should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for",
"HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text and comment on it",
"work here!!\", \"end_highlight_position\": 15 }} self.update_comment = { \"comment\": { \"body\": \"Nice Idea\"",
"comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url, self.create_article_data,",
"= self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on",
"= self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should",
"the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data =",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single comment article_id",
"response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id']",
"response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\"",
"= reverse('articles:articles-listcreate') self.user_two_details = { \"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\"",
"\"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field =",
"self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15",
"token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text,",
"self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"], \"The comment",
"comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token =",
"text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token =",
"2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not",
"self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type that are not integers.\"\"\"",
"exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details) # create",
"{}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update",
"= response.data['slug'] # highlight a text and comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}),",
"'Comment on highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single",
"{ \"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data = {",
"# highlight a text and comment on it response = self.client.post( reverse('articles:high_light', kwargs={'slug':",
"response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def",
"\"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post(",
"setUp(self): self.client = APIClient() self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = {",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight and",
"\"body\": \"Nice Idea\" }} def register_user(self, user_details): \"\"\"Sign up a new user to",
"all comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url,",
"kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id'] # update the comment",
"test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response",
"text and comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') #",
"}} self.missing_field = { \"comment\": { \"body\": \"Good work here!!\", \"end_highlight_position\": 15 }}",
"self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start",
"response.data['slug'] # highlight a text and comment on it response = self.client.post( reverse('articles:high_light',",
"def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token = self.register_user(self.user_two_details) # create an article",
"'The start_index_position should not ' 'be greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def",
"= self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the",
"format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the comment response = self.client.delete(",
"highlight a text and comment on it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}),",
"# delete single comment article_id = response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token",
"\"body\": \"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = { \"comment\":",
"{}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should not ' 'be greater or equal end_index_position')",
"\"body\": \"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field = { \"comment\":",
"single comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url,",
"create an article response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug =",
"'/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test",
"and end of highlight' ' indices should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def",
"article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete",
"unexisting comment\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url,",
"def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article",
"article_id = response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data =",
"token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype,",
"self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment",
"\"<PASSWORD>\" }} self.create_article_data = { \"title\": \"Programming Languages\", \"body\": \"There are variety of",
"self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger than end index.\"\"\" token",
"reverse from rest_framework.views import status from rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase): def",
"comment on it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"integers.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}),",
"{ \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype",
"28, \"end_highlight_position\": 15 }} self.invalid_index_datatype = { \"comment\": { \"body\": \"Good work here!!\",",
"= self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'],",
"work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype = { \"comment\": { \"body\":",
"{ \"body\": \"Nice Idea\" }} def register_user(self, user_details): \"\"\"Sign up a new user",
"def create_article(self, token): \"\"\"Create an article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token",
"= json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self):",
"get all comments response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data))",
"on it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id",
"self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token),",
"= self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single comment article_id = response.data['id'] response = self.client.delete(",
"here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\": \"Good",
"deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token = self.register_user(self.user_two_details)",
"reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should not '",
"test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token = self.register_user(self.user_two_details) # create an article response =",
"{}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"= { \"comment\": { \"body\": \"Good work here!!\", \"end_highlight_position\": 15 }} self.update_comment =",
"{ \"body\": \"Good work here!!\", \"end_highlight_position\": 15 }} self.update_comment = { \"comment\": {",
"comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data))",
"= register.data[\"token\"] return token def create_article(self, token): \"\"\"Create an article.\"\"\" response = self.client.post(",
"comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data))",
"response.data['slug'] # highlight a text and comment it response = self.client.post( reverse('articles:high_light', kwargs={'slug':",
"are not integers.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light',",
"not ' 'be greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects",
"self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text",
"format='json') article_id = response.data['id'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id),",
"format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger than",
"self.assertEqual(response.data['error'], 'The start_index_position should not ' 'be greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK)",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text deleted successfully')",
"self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text', response.data)",
"comment highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light',",
"\"\"\"Test get single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response =",
"format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted",
"format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single",
"comment\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post( self.create_article_url, self.create_article_data,",
"equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type that are",
"\"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index =",
"# highlight a text and comment it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}),",
"\"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field = { \"comment\": {",
"and comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get",
"{}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all",
"highlight a text and comment it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text,",
"'Start of highlight and end of highlight' ' indices should be both integers')",
"get single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post(",
"\"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data = { \"title\": \"Programming Languages\", \"body\":",
"self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start",
"\"\"\"Test comment highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post(",
"\"\"\"Test rejects index data type that are not integers.\"\"\" token = self.register_user(self.user_two_details) slug",
"highlight a text and comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token),",
"= json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token",
"\"password\": \"<PASSWORD>\" }} self.create_article_data = { \"title\": \"Programming Languages\", \"body\": \"There are variety",
"reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self):",
"article_id = response.data['id'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment,",
"json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token =",
"' 'be greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index",
"status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token = self.register_user(self.user_two_details) # create an",
"\"\"\"Test update single comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response =",
"start index larger than end index.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response",
"self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token = self.register_user(self.user_two_details) # create",
"format='json') token = register.data[\"token\"] return token def create_article(self, token): \"\"\"Create an article.\"\"\" response",
"to get a token\"\"\" register = self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"] return",
"self.signup_url = reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = { \"user\": { \"username\": \"andela\",",
"\"Nice Idea\" }} def register_user(self, user_details): \"\"\"Sign up a new user to get",
"self.user_two_details = { \"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\", \"password\": \"<PASSWORD>\" }} self.create_article_data",
"kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight and end of",
"= self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'],",
"delete single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response = self.client.post(",
"= self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data[\"error\"], \"The",
"response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code,",
"Idea\" }} def register_user(self, user_details): \"\"\"Sign up a new user to get a",
"HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text and comment it self.client.post(",
"15 }} self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 28,",
"here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype = { \"comment\": { \"body\": \"Good",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should not ' 'be greater or equal",
"{ \"comment\": { \"body\": \"Nice Idea\" }} def register_user(self, user_details): \"\"\"Sign up a",
"slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all comments response = self.client.get( reverse('articles:high_light',",
"HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug,",
"test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response",
"kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should not ' 'be",
"= response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token = self.register_user(self.user_two_details)",
"from rest_framework.test import APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient() self.signup_url",
"comments response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0])",
"status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token = self.register_user(self.user_two_details) # create an",
"import APITestCase, APIClient class CommentsTestCase(APITestCase): def setUp(self): self.client = APIClient() self.signup_url = reverse('authentication:auth-register')",
"status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token = self.register_user(self.user_two_details) # create an",
"comment article_id = response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data))",
"reverse('authentication:auth-register') self.create_article_url = reverse('articles:articles-listcreate') self.user_two_details = { \"user\": { \"username\": \"andela\", \"email\": \"<EMAIL>\",",
"test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response",
"self.create_article_data = { \"title\": \"Programming Languages\", \"body\": \"There are variety of programming languagr\",",
"reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id'] # update the",
"all comments response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text',",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id'] # update",
"{}'.format(token), format='json') article_id = response.data['id'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug,",
"= json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token",
"status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token)",
"= response.data['id'] # update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token",
"of highlight' ' indices should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test",
"rejects index data type that are not integers.\"\"\" token = self.register_user(self.user_two_details) slug =",
"status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger than end index.\"\"\" token =",
"response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK)",
"kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single comment article_id = response.data['id']",
"{}'.format(token), format='json') self.assertIn('selected_text', response.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_rejects_start_index_larger_than_end_index(self): \"\"\"Test rejects start index larger",
"response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'],",
"end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self): \"\"\"Test rejects index data type that are not",
"}} self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\":",
"\"There are variety of programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] }",
"than end index.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light',",
"are variety of programming languagr\", \"description\": \"Programming\", \"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text",
"both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token = self.register_user(self.user_two_details)",
"here!!\", \"end_highlight_position\": 15 }} self.update_comment = { \"comment\": { \"body\": \"Nice Idea\" }}",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not exist') self.assertEqual(response.status_code,",
"format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text and comment it",
"format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text and comment on",
"the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, article_id), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data =",
"kwargs={'slug': slug}), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test",
"def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token = self.register_user(self.user_two_details) # create an article",
"def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response",
"text and comment it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token),",
"= response.data['slug'] # highlight a text and comment on it response = self.client.post(",
"{ \"comment\": { \"body\": \"Good work here!!\", \"end_highlight_position\": 15 }} self.update_comment = {",
"self.invalid_index_datatype = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": \"one\", \"end_highlight_position\": 15",
"test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response =",
"}} self.update_comment = { \"comment\": { \"body\": \"Nice Idea\" }} def register_user(self, user_details):",
"\"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype = { \"comment\": { \"body\": \"Good work",
"format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the comment response = self.client.put(",
"self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not exist')",
"= json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete",
"work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\":",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id'] # update the comment response = self.client.put(",
"\"\"\"Test update unexisting comment.\"\"\" token = self.register_user(self.user_two_details) # create an article response =",
"\"end_highlight_position\": 15 }} self.selection_start_index_larger_than_end_index = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\":",
"it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id =",
"slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') article_id = response.data['id'] # update the comment response",
"response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of",
"article response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] #",
"slug}), self.selection_start_index_larger_than_end_index, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'The start_index_position should not ' 'be greater",
"= self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'start_highlight_position is required')",
"token = register.data[\"token\"] return token def create_article(self, token): \"\"\"Create an article.\"\"\" response =",
"format='json') # get all comments response = self.client.get( reverse('articles:high_light', kwargs={'slug': slug}), format='json') response_data",
"format='json') response_data = json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK)",
"it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete",
"\"language\", \"python\"] } self.highlighted_text = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\":",
"format='json') self.assertEqual(response.data['error'], 'The start_index_position should not ' 'be greater or equal end_index_position') self.assertEqual(response.status_code,",
"\"body\": \"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype = { \"comment\":",
"response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug",
"slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token)",
"that are not integers.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post(",
"type that are not integers.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response =",
"text.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}),",
"'The comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting comment\"\"\" token",
"\"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype = { \"comment\": {",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight and end of highlight' ' indices",
"self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\" token = self.register_user(self.user_two_details) # create",
"update the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data",
"json.loads(json.dumps(response.data)) self.assertEqual(response.data['message'], 'Comment on highlighted text deleted successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test",
"json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token =",
"self.invalid_index_datatype, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'], 'Start of highlight and end of highlight' '",
"json.loads(json.dumps(response.data)) self.assertEqual(response.data['error'], 'The comment does not exist') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_get_delete_unexisting_comments(self): \"\"\"Delete unexisting",
"successfully') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_single_comments(self): \"\"\"Test update single comment.\"\"\" token = self.register_user(self.user_two_details) #",
"}} self.create_article_data = { \"title\": \"Programming Languages\", \"body\": \"There are variety of programming",
"} self.highlighted_text = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 2, \"end_highlight_position\":",
"\"end_highlight_position\": 15 }} self.invalid_index_datatype = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\":",
"\"\"\"Test get all comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response =",
"it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get",
"\"tagList\": [\"Programming\", \"language\", \"python\"] } self.highlighted_text = { \"comment\": { \"body\": \"Good work",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all comments response",
"comment it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all",
"integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing field.\"\"\" token = self.register_user(self.user_two_details) slug",
"\"\"\"Test delete single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response =",
"= self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"] return token def create_article(self, token): \"\"\"Create",
"start_index_position should not ' 'be greater or equal end_index_position') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_rejects_invalid_types_for_highlight_index(self):",
"15 }} self.missing_field = { \"comment\": { \"body\": \"Good work here!!\", \"end_highlight_position\": 15",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test",
"self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # update the comment",
"self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self):",
"it self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all comments",
"self.assertIn('selected_text', response_data[0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token = self.register_user(self.user_two_details)",
"get single comment article_id = response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id), format='json') response_data",
"HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get single comment article_id = response.data['id'] response = self.client.get(",
"= self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertIn('selected_text',",
"test_get_single_comments(self): \"\"\"Test get single comments.\"\"\" token = self.register_user(self.user_two_details) # create an article response",
"and comment it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json')",
"format='json') response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting",
"an article response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug']",
"format='json') # get single comment article_id = response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug, article_id),",
"slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single comment article_id = response.data['id'] response",
"django.urls import reverse from rest_framework.views import status from rest_framework.test import APITestCase, APIClient class",
"token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.selection_start_index_larger_than_end_index,",
"\"end_highlight_position\": 15 }} self.update_comment = { \"comment\": { \"body\": \"Nice Idea\" }} def",
"= self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token",
"= self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.missing_field, HTTP_AUTHORIZATION='token {}'.format(token), format='json') self.assertEqual(response.data['error'],",
"status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token = self.register_user(self.user_two_details) # create an",
"is required') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_all_comments(self): \"\"\"Test get all comments.\"\"\" token = self.register_user(self.user_two_details)",
"self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_unexisting_comments(self): \"\"\"Test update unexisting comment.\"\"\" token = self.register_user(self.user_two_details)",
"{}'.format(token)) slug = response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token",
"15 }} self.invalid_index_datatype = { \"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": \"one\",",
"not integers.\"\"\" token = self.register_user(self.user_two_details) slug = self.create_article(token) response = self.client.post( reverse('articles:high_light', kwargs={'slug':",
"self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # delete single comment article_id",
"\"comment\": { \"body\": \"Good work here!!\", \"start_highlight_position\": 28, \"end_highlight_position\": 15 }} self.invalid_index_datatype =",
"update the comment response = self.client.put( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token), format='json') response_data",
"response_data = json.loads(json.dumps(response.data)) self.assertIn('selected_text', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_single_comments(self): \"\"\"Test delete single comments.\"\"\"",
"token def create_article(self, token): \"\"\"Create an article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data, format='json',",
"= self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a",
"\"\"\"Test rejects start index larger than end index.\"\"\" token = self.register_user(self.user_two_details) slug =",
"{}'.format(token), format='json') # get single comment article_id = response.data['id'] response = self.client.get( '/api/articles/{}/highlight/{}'.format(slug,",
"self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] return slug def test_comment_highlighted_text(self): \"\"\"Test",
"self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug = response.data['slug'] # highlight a text and comment",
"token): \"\"\"Create an article.\"\"\" response = self.client.post( self.create_article_url, self.create_article_data, format='json', HTTP_AUTHORIZATION='token {}'.format(token)) slug",
"response.data['slug'] # update the comment response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, 2), self.update_comment, HTTP_AUTHORIZATION='token {}'.format(token),",
"return slug def test_comment_highlighted_text(self): \"\"\"Test comment highlighted text.\"\"\" token = self.register_user(self.user_two_details) slug =",
"{ \"title\": \"Programming Languages\", \"body\": \"There are variety of programming languagr\", \"description\": \"Programming\",",
"delete single comment article_id = response.data['id'] response = self.client.delete( '/api/articles/{}/highlight/{}'.format(slug, article_id), HTTP_AUTHORIZATION='token {}'.format(token),",
"\"start_highlight_position\": \"one\", \"end_highlight_position\": 15 }} self.missing_field = { \"comment\": { \"body\": \"Good work",
"json from django.urls import reverse from rest_framework.views import status from rest_framework.test import APITestCase,",
"\"Good work here!!\", \"end_highlight_position\": 15 }} self.update_comment = { \"comment\": { \"body\": \"Nice",
"slug = response.data['slug'] # highlight a text and comment it response = self.client.post(",
"a text and comment it response = self.client.post( reverse('articles:high_light', kwargs={'slug': slug}), self.highlighted_text, HTTP_AUTHORIZATION='token",
"' indices should be both integers') self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY) def test_rejects_missing_required_field(self): \"\"\"Test for missing",
"self.highlighted_text, HTTP_AUTHORIZATION='token {}'.format(token), format='json') # get all comments response = self.client.get( reverse('articles:high_light', kwargs={'slug':",
"= { \"title\": \"Programming Languages\", \"body\": \"There are variety of programming languagr\", \"description\":",
"a token\"\"\" register = self.client.post(self.signup_url, user_details, format='json') token = register.data[\"token\"] return token def",
"register_user(self, user_details): \"\"\"Sign up a new user to get a token\"\"\" register =",
"= response.data['slug'] # highlight a text and comment it response = self.client.post( reverse('articles:high_light',"
] |
[
"'''C -2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000 C",
"C -2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000 H",
"H -0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800 H",
"0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100 H -4.73992878",
"-0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200 ''' bas",
"1.59854938 -0.00086200 ''' bas = 'def2-svp' mf = guess.mix(xyz, bas, conv='tight') mf2 =",
"H -3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200 ''' bas = 'def2-svp' mf",
"import lib, scf #from pyphf import guess, suscf from automr import autocas, guess",
"suscf from automr import autocas, guess lib.num_threads(8) xyz = '''C -2.94294278 0.39039038 0.00000000",
"from automr import autocas, guess lib.num_threads(8) xyz = '''C -2.94294278 0.39039038 0.00000000 C",
"guess, suscf from automr import autocas, guess lib.num_threads(8) xyz = '''C -2.94294278 0.39039038",
"-1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900 C -2.94272378",
"C -0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800 C",
"H -4.73992878 1.59854938 -0.00086200 ''' bas = 'def2-svp' mf = guess.mix(xyz, bas, conv='tight')",
"2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662",
"0.00131500 H 0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100",
"pyphf import guess, suscf from automr import autocas, guess lib.num_threads(8) xyz = '''C",
"-2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000 C -1.54789878",
"0.00000000 C -0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800",
"1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938",
"0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038",
"scf #from pyphf import guess, suscf from automr import autocas, guess lib.num_threads(8) xyz",
"autocas, guess lib.num_threads(8) xyz = '''C -2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000",
"lib.num_threads(8) xyz = '''C -2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000 C -0.85024478",
"0.00000000 C -1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900",
"automr import autocas, guess lib.num_threads(8) xyz = '''C -2.94294278 0.39039038 0.00000000 C -1.54778278",
"C -1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200 H",
"0.00000000 C -1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200",
"-0.00068200 H -3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400",
"import guess, suscf from automr import autocas, guess lib.num_threads(8) xyz = '''C -2.94294278",
"-0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800 C -3.64032478",
"0.00063400 H -0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200",
"-0.00263100 H -4.73992878 1.59854938 -0.00086200 ''' bas = 'def2-svp' mf = guess.mix(xyz, bas,",
"pyscf import lib, scf #from pyphf import guess, suscf from automr import autocas,",
"from pyscf import lib, scf #from pyphf import guess, suscf from automr import",
"C -1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900 C",
"-0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338",
"H 0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100 H",
"-0.00086200 ''' bas = 'def2-svp' mf = guess.mix(xyz, bas, conv='tight') mf2 = autocas.cas(mf)",
"-0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800 H -3.49284578",
"0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238",
"H -3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400 H",
"lib, scf #from pyphf import guess, suscf from automr import autocas, guess lib.num_threads(8)",
"-3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500 H 0.24943522",
"2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262",
"-3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400 H -0.99769878",
"-1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200 H -3.49270178",
"-0.00125800 H -3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200 ''' bas = 'def2-svp'",
"xyz = '''C -2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138",
"-3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200 ''' bas = 'def2-svp' mf =",
"H -0.99769878 3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200 '''",
"3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200 ''' bas = 'def2-svp' mf = guess.mix(xyz,",
"guess lib.num_threads(8) xyz = '''C -2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000 C",
"-4.73992878 1.59854938 -0.00086200 ''' bas = 'def2-svp' mf = guess.mix(xyz, bas, conv='tight') mf2",
"1.59814138 0.00000000 C -1.54789878 2.80665038 -0.00119900 C -2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638",
"0.00045000 H -0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338 -0.00125800",
"-0.00119900 C -2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000",
"import autocas, guess lib.num_threads(8) xyz = '''C -2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038",
"1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138",
"-0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500 H 0.24943522 1.59822138 0.00063400 H -0.99769878 3.75879338",
"#from pyphf import guess, suscf from automr import autocas, guess lib.num_threads(8) xyz =",
"C -3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500 H",
"-2.94272378 2.80657238 -0.00167800 C -3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000 H -0.99827478",
"= '''C -2.94294278 0.39039038 0.00000000 C -1.54778278 0.39039038 0.00000000 C -0.85024478 1.59814138 0.00000000",
"-0.00167800 C -3.64032478 1.59836638 -0.00068200 H -3.49270178 -0.56192662 0.00045000 H -0.99827478 -0.56212262 0.00131500",
"3.75879338 -0.00125800 H -3.49284578 3.75885338 -0.00263100 H -4.73992878 1.59854938 -0.00086200 ''' bas ="
] |
[
"'0007_add_support_oracle'), ] operations = [ migrations.AlterField( model_name='request', name='prof_file', field=models.FileField(default='', max_length=300, null=True, storage=silk.storage.ProfilerResultStorage(), upload_to=b''),",
"__future__ import unicode_literals from django.db import migrations, models import silk.storage class Migration(migrations.Migration): dependencies",
"models import silk.storage class Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'), ] operations =",
"utf-8 -*- # Generated by Django 1.9.7 on 2018-01-11 09:51 from __future__ import",
"unicode_literals from django.db import migrations, models import silk.storage class Migration(migrations.Migration): dependencies = [",
"by Django 1.9.7 on 2018-01-11 09:51 from __future__ import unicode_literals from django.db import",
"import unicode_literals from django.db import migrations, models import silk.storage class Migration(migrations.Migration): dependencies =",
"on 2018-01-11 09:51 from __future__ import unicode_literals from django.db import migrations, models import",
"from django.db import migrations, models import silk.storage class Migration(migrations.Migration): dependencies = [ ('silk',",
"Generated by Django 1.9.7 on 2018-01-11 09:51 from __future__ import unicode_literals from django.db",
"('silk', '0007_add_support_oracle'), ] operations = [ migrations.AlterField( model_name='request', name='prof_file', field=models.FileField(default='', max_length=300, null=True, storage=silk.storage.ProfilerResultStorage(),",
"2018-01-11 09:51 from __future__ import unicode_literals from django.db import migrations, models import silk.storage",
"-*- # Generated by Django 1.9.7 on 2018-01-11 09:51 from __future__ import unicode_literals",
"import migrations, models import silk.storage class Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'), ]",
"Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'), ] operations = [ migrations.AlterField( model_name='request', name='prof_file',",
"coding: utf-8 -*- # Generated by Django 1.9.7 on 2018-01-11 09:51 from __future__",
"import silk.storage class Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'), ] operations = [",
"class Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'), ] operations = [ migrations.AlterField( model_name='request',",
"= [ ('silk', '0007_add_support_oracle'), ] operations = [ migrations.AlterField( model_name='request', name='prof_file', field=models.FileField(default='', max_length=300,",
"# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2018-01-11 09:51",
"operations = [ migrations.AlterField( model_name='request', name='prof_file', field=models.FileField(default='', max_length=300, null=True, storage=silk.storage.ProfilerResultStorage(), upload_to=b''), ), ]",
"] operations = [ migrations.AlterField( model_name='request', name='prof_file', field=models.FileField(default='', max_length=300, null=True, storage=silk.storage.ProfilerResultStorage(), upload_to=b''), ),",
"09:51 from __future__ import unicode_literals from django.db import migrations, models import silk.storage class",
"# Generated by Django 1.9.7 on 2018-01-11 09:51 from __future__ import unicode_literals from",
"[ ('silk', '0007_add_support_oracle'), ] operations = [ migrations.AlterField( model_name='request', name='prof_file', field=models.FileField(default='', max_length=300, null=True,",
"1.9.7 on 2018-01-11 09:51 from __future__ import unicode_literals from django.db import migrations, models",
"silk.storage class Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'), ] operations = [ migrations.AlterField(",
"-*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2018-01-11 09:51 from",
"from __future__ import unicode_literals from django.db import migrations, models import silk.storage class Migration(migrations.Migration):",
"Django 1.9.7 on 2018-01-11 09:51 from __future__ import unicode_literals from django.db import migrations,",
"django.db import migrations, models import silk.storage class Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'),",
"dependencies = [ ('silk', '0007_add_support_oracle'), ] operations = [ migrations.AlterField( model_name='request', name='prof_file', field=models.FileField(default='',",
"migrations, models import silk.storage class Migration(migrations.Migration): dependencies = [ ('silk', '0007_add_support_oracle'), ] operations"
] |
[
"set the corresponding environment variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If spawning the",
"needed. # def _send_action(self, action): \"\"\" Function to send an action to the",
"the corresponding environment variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If spawning the robot",
"\"\"\" Function to send an action to the robot \"\"\" raise NotImplementedError() def",
"is False. \"\"\" reset_controllers=False \"\"\" Set the reset mode of gazebo at the",
"rospy #- Uncomment the library modules as neeeed # from frobs_rl.common import ros_gazebo",
"world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args,",
"episode has a success condition then set done as: self.info['is_success'] = 1.0 \"\"\"",
"gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x,",
"the position of the robot, at the begining of each episode. \"\"\" raise",
"# from frobs_rl.common import ros_params # from frobs_rl.common import ros_urdf # from frobs_rl.common",
"variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If spawning the robot using the given",
"step mode of Gazebo. 1 is \"using ROS services\", 2 is \"using step",
"used in the env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If launching Gazebo",
"set the corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None",
"publisher or subscribers as needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) #",
"them here if needed. # def _send_action(self, action): \"\"\" Function to send an",
"task envs using the custom robot. \"\"\" def __init__(self): \"\"\" Describe the robot",
"# def _check_subs_and_pubs_connection(self): \"\"\" Function to check if the Gazebo and ROS connections",
"\"controller_list\" will be reset at the beginning of each episode, default is False.",
"# from frobs_rl.common import ros_controllers # from frobs_rl.common import ros_node # from frobs_rl.common",
"environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq=",
"import ros_spawn \"\"\" Although it is best to register only the task environment,",
"\"\"\" return True #-------------------------------------------------------# # Custom available methods for the CustomRobotEnv # #",
"If spawning the robot using the given spawner then set the corresponding environment",
"with the env then set the corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True",
"# from frobs_rl.common import ros_urdf # from frobs_rl.common import ros_spawn \"\"\" Although it",
"\"\"\" Function to check if the episode is done. If the episode has",
"gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file,",
"Function to send an action to the robot \"\"\" raise NotImplementedError() def _get_observation(self):",
"raise NotImplementedError() def _check_if_done(self): \"\"\" Function to check if the episode is done.",
"rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using the",
"urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z,",
"gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo with a custom world then set the",
"modules as neeeed # from frobs_rl.common import ros_gazebo # from frobs_rl.common import ros_controllers",
"set the corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None",
"urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0;",
"Robot Env, use this for all task envs using the custom robot. \"\"\"",
"task environment, one can also register the robot environment. \"\"\" # register( #",
") class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use this for all task envs",
"_get_reward(self): \"\"\" Function to get the reward from the enviroment. \"\"\" raise NotImplementedError()",
"of each episode: 1 is \"reset_world\", 2 is \"reset_simulation\". Default is 1. \"\"\"",
"spaces from gym.envs.registration import register from frobs_rl.envs import robot_BasicEnv import rospy #- Uncomment",
"an action to the robot \"\"\" raise NotImplementedError() def _get_observation(self): \"\"\" Function to",
"JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using the __check_subs_and_pubs_connection",
"reset mode of gazebo at the beginning of each episode: 1 is \"reset_world\",",
"Default is 1. If using the step mode 2 then set the number",
"a custom world then set the corresponding environment variables. \"\"\" world_path=None world_pkg=None world_filename=None",
"\"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0;",
"available methods for the CustomRobotEnv # # Although it is best to implement",
"this for all task envs using the custom robot. \"\"\" def __init__(self): \"\"\"",
"methods in # # the Task Env, one can use them here if",
"from frobs_rl.common import ros_params # from frobs_rl.common import ros_urdf # from frobs_rl.common import",
"_set_episode_init_params(self): \"\"\" Function to set some parameters, like the position of the robot,",
"_check_subs_and_pubs_connection(self): \"\"\" Function to check if the Gazebo and ROS connections are ready",
"\"\"\" Although it is best to register only the task environment, one can",
"class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use this for all task envs using",
"import ros_launch # from frobs_rl.common import ros_params # from frobs_rl.common import ros_urdf #",
"the robot environment. \"\"\" # register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, #",
"Env\") \"\"\" If launching Gazebo with the env then set the corresponding environment",
"# id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot",
"then un-comment the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\"",
"frobs_rl.common import ros_urdf # from frobs_rl.common import ros_spawn \"\"\" Although it is best",
"check if the Gazebo and ROS connections are ready \"\"\" return True #-------------------------------------------------------#",
"the corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None",
"number of steps of Gazebo to take in each episode. Default is 1.",
"gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo with a custom world then set",
"ros_gazebo # from frobs_rl.common import ros_controllers # from frobs_rl.common import ros_node # from",
"the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__",
"parent class with the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording,",
"method, then un-comment the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics()",
"controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode,",
"controllers in \"controller_list\" will be reset at the beginning of each episode, default",
"\"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent class with the corresponding variables. \"\"\"",
"import register from frobs_rl.envs import robot_BasicEnv import rospy #- Uncomment the library modules",
"in each episode. Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent",
"from the enviroment. \"\"\" raise NotImplementedError() def _get_reward(self): \"\"\" Function to get the",
"each episode: 1 is \"reset_world\", 2 is \"reset_simulation\". Default is 1. \"\"\" reset_mode=1",
"from frobs_rl.common import ros_urdf # from frobs_rl.common import ros_spawn \"\"\" Although it is",
"\"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If launching Gazebo with the env then",
"of steps of Gazebo to take in each episode. Default is 1. \"\"\"",
"model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the controllers in \"controller_list\" will",
"library modules as neeeed # from frobs_rl.common import ros_gazebo # from frobs_rl.common import",
"gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder,",
"the reset mode of gazebo at the beginning of each episode: 1 is",
"Robot Env\") \"\"\" If launching Gazebo with the env then set the corresponding",
"ros_controllers # from frobs_rl.common import ros_node # from frobs_rl.common import ros_launch # from",
"is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent class with the corresponding",
"# Custom available methods for the CustomRobotEnv # # Although it is best",
"custom world then set the corresponding environment variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\"",
"raise NotImplementedError() def _get_observation(self): \"\"\" Function to get the observation from the enviroment.",
"__init__(self): \"\"\" Describe the robot used in the env. \"\"\" rospy.loginfo(\"Starting Custom Robot",
"Env, use this for all task envs using the custom robot. \"\"\" def",
"environment. \"\"\" # register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class",
"the env then set the corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False",
"# # Although it is best to implement these methods in # #",
"num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers as needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command',",
"2 then set the number of steps of Gazebo to take in each",
"import ros_params # from frobs_rl.common import ros_urdf # from frobs_rl.common import ros_spawn \"\"\"",
"raise NotImplementedError() def _get_reward(self): \"\"\" Function to get the reward from the enviroment.",
"frobs_rl.common import ros_node # from frobs_rl.common import ros_launch # from frobs_rl.common import ros_params",
"subscribers as needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 =",
"environment variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If spawning the robot using the",
"the observation from the enviroment. \"\"\" raise NotImplementedError() def _get_reward(self): \"\"\" Function to",
"done as: self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to",
"is best to register only the task environment, one can also register the",
"NotImplementedError() def _get_reward(self): \"\"\" Function to get the reward from the enviroment. \"\"\"",
"check if the episode is done. If the episode has a success condition",
"\"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If spawning the robot using the given spawner",
"Gazebo\". Default is 1. If using the step mode 2 then set the",
"get the observation from the enviroment. \"\"\" raise NotImplementedError() def _get_reward(self): \"\"\" Function",
"model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the controllers in",
"reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers as needed. \"\"\" # self.pub1",
"Function to get the observation from the enviroment. \"\"\" raise NotImplementedError() def _get_reward(self):",
"corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename,",
"or subscribers as needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1",
"Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init of Custom Robot env\") #------------------------------------------# # Custom",
"JointState, self.callback1) \"\"\" If using the __check_subs_and_pubs_connection method, then un-comment the lines below.",
"# ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init",
"using the given spawner then set the corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\"",
"import spaces from gym.envs.registration import register from frobs_rl.envs import robot_BasicEnv import rospy #-",
"Env, one can use them here if needed. # def _send_action(self, action): \"\"\"",
"\"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init of Custom Robot env\") #------------------------------------------# #",
"services\", 2 is \"using step function of Gazebo\". Default is 1. If using",
"world then set the corresponding environment variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If",
"raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to set some parameters, like the position",
"def _set_episode_init_params(self): \"\"\" Function to set some parameters, like the position of the",
"observation from the enviroment. \"\"\" raise NotImplementedError() def _get_reward(self): \"\"\" Function to get",
"Custom Robot Env\") \"\"\" If launching Gazebo with the env then set the",
"def __init__(self): \"\"\" Describe the robot used in the env. \"\"\" rospy.loginfo(\"Starting Custom",
"\"\"\" # register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv):",
"variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq,",
"# Although it is best to implement these methods in # # the",
"import robot_BasicEnv import rospy #- Uncomment the library modules as neeeed # from",
"\"\"\" If spawning the robot using the given spawner then set the corresponding",
"\"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to set some parameters, like the",
"take in each episode. Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the",
"to register only the task environment, one can also register the robot environment.",
"corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None",
"of Gazebo to take in each episode. Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1",
"robot \"\"\" raise NotImplementedError() def _get_observation(self): \"\"\" Function to get the observation from",
"= 1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to set some parameters,",
"will be reset at the beginning of each episode, default is False. \"\"\"",
"the episode has a success condition then set done as: self.info['is_success'] = 1.0",
"these methods in # # the Task Env, one can use them here",
"then set the corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\"",
"episode is done. If the episode has a success condition then set done",
"ros_urdf # from frobs_rl.common import ros_spawn \"\"\" Although it is best to register",
"# self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\"",
"to check if the Gazebo and ROS connections are ready \"\"\" return True",
"import ros_urdf # from frobs_rl.common import ros_spawn \"\"\" Although it is best to",
"with the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path,",
"from frobs_rl.common import ros_node # from frobs_rl.common import ros_launch # from frobs_rl.common import",
"then set done as: self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\"",
"env then set the corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100",
"= rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using the __check_subs_and_pubs_connection method, then un-comment the",
"then set the corresponding environment variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If spawning",
"model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher",
"then set the corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None",
"1 is \"using ROS services\", 2 is \"using step function of Gazebo\". Default",
"\"\"\" def __init__(self): \"\"\" Describe the robot used in the env. \"\"\" rospy.loginfo(\"Starting",
"model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers as needed. \"\"\"",
"use them here if needed. # def _send_action(self, action): \"\"\" Function to send",
"as neeeed # from frobs_rl.common import ros_gazebo # from frobs_rl.common import ros_controllers #",
"\"\"\" reset_controllers=False \"\"\" Set the reset mode of gazebo at the beginning of",
"\"reset_simulation\". Default is 1. \"\"\" reset_mode=1 \"\"\" Set the step mode of Gazebo.",
"Set if the controllers in \"controller_list\" will be reset at the beginning of",
"frobs_rl.envs import robot_BasicEnv import rospy #- Uncomment the library modules as neeeed #",
"spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0;",
"None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the controllers",
"# self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init of Custom",
"# def _send_action(self, action): \"\"\" Function to send an action to the robot",
"model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define",
"Robot env\") #------------------------------------------# # Custom methods for the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\"",
"register the robot environment. \"\"\" # register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000,",
"1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent class with the corresponding variables.",
"the given spawner then set the corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\"",
"False. \"\"\" reset_controllers=False \"\"\" Set the reset mode of gazebo at the beginning",
"#-------------------------------------------------------# # Custom available methods for the CustomRobotEnv # # Although it is",
"using the custom robot. \"\"\" def __init__(self): \"\"\" Describe the robot used in",
"self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo,",
"is done. If the episode has a success condition then set done as:",
"is best to implement these methods in # # the Task Env, one",
"namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x,",
"default is False. \"\"\" reset_controllers=False \"\"\" Set the reset mode of gazebo at",
"_check_if_done(self): \"\"\" Function to check if the episode is done. If the episode",
"max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use this for all",
"the reward from the enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\" Function to",
"if the episode is done. If the episode has a success condition then",
"below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\"",
"connections are ready \"\"\" return True #-------------------------------------------------------# # Custom available methods for the",
"#- Uncomment the library modules as neeeed # from frobs_rl.common import ros_gazebo #",
"\"\"\" Describe the robot used in the env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\")",
"success condition then set done as: self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError() def",
"def _check_subs_and_pubs_connection(self): \"\"\" Function to check if the Gazebo and ROS connections are",
"urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode,",
"to get the reward from the enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\"",
"at the beginning of each episode: 1 is \"reset_world\", 2 is \"reset_simulation\". Default",
"a success condition then set done as: self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError()",
"the task environment, one can also register the robot environment. \"\"\" # register(",
"methods for the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function to check if the",
"beginning of each episode, default is False. \"\"\" reset_controllers=False \"\"\" Set the reset",
"launching Gazebo with the env then set the corresponding environment variables. \"\"\" launch_gazebo=False",
"the episode is done. If the episode has a success condition then set",
"the __check_subs_and_pubs_connection method, then un-comment the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection()",
"Function to check if the Gazebo and ROS connections are ready \"\"\" return",
"rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the",
"to implement these methods in # # the Task Env, one can use",
"\"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\" Function to check if the episode is",
"self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init of Custom Robot",
"ROS connections are ready \"\"\" return True #-------------------------------------------------------# # Custom available methods for",
"in the env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If launching Gazebo with",
"the Task Env, one can use them here if needed. # def _send_action(self,",
"frobs_rl.common import ros_launch # from frobs_rl.common import ros_params # from frobs_rl.common import ros_urdf",
"using the step mode 2 then set the number of steps of Gazebo",
"if the Gazebo and ROS connections are ready \"\"\" return True #-------------------------------------------------------# #",
"are ready \"\"\" return True #-------------------------------------------------------# # Custom available methods for the CustomRobotEnv",
"\"\"\" Define publisher or subscribers as needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState,",
"the CustomRobotEnv # # Although it is best to implement these methods in",
"spawning the robot using the given spawner then set the corresponding environment variables.",
"the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function to check if the Gazebo and",
"def _send_action(self, action): \"\"\" Function to send an action to the robot \"\"\"",
"super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot,",
"from frobs_rl.common import ros_controllers # from frobs_rl.common import ros_node # from frobs_rl.common import",
"model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the controllers in \"controller_list\" will be reset at",
"environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching",
"Set the step mode of Gazebo. 1 is \"using ROS services\", 2 is",
"#------------------------------------------# # Custom methods for the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function to",
"model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers as",
"like the position of the robot, at the begining of each episode. \"\"\"",
"needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState,",
"# Custom methods for the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function to check",
"\"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished",
"can use them here if needed. # def _send_action(self, action): \"\"\" Function to",
"pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y,",
"the controllers in \"controller_list\" will be reset at the beginning of each episode,",
"\"\"\" If launching Gazebo with a custom world then set the corresponding environment",
"set some parameters, like the position of the robot, at the begining of",
"from gym.envs.registration import register from frobs_rl.envs import robot_BasicEnv import rospy #- Uncomment the",
"Define publisher or subscribers as needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1)",
"frobs_rl.common import ros_gazebo # from frobs_rl.common import ros_controllers # from frobs_rl.common import ros_node",
"is \"reset_world\", 2 is \"reset_simulation\". Default is 1. \"\"\" reset_mode=1 \"\"\" Set the",
"from the enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\" Function to check if",
"steps of Gazebo to take in each episode. Default is 1. \"\"\" step_mode=1",
"Gazebo to take in each episode. Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\"",
"# from frobs_rl.common import ros_node # from frobs_rl.common import ros_launch # from frobs_rl.common",
"if needed. # def _send_action(self, action): \"\"\" Function to send an action to",
"episode, default is False. \"\"\" reset_controllers=False \"\"\" Set the reset mode of gazebo",
"custom robot. \"\"\" def __init__(self): \"\"\" Describe the robot used in the env.",
"NotImplementedError() def _get_observation(self): \"\"\" Function to get the observation from the enviroment. \"\"\"",
"id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env,",
"Function to get the reward from the enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self):",
"condition then set done as: self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self):",
"action): \"\"\" Function to send an action to the robot \"\"\" raise NotImplementedError()",
"urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w,",
"enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\" Function to check if the episode",
"each episode. Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent class",
"Although it is best to register only the task environment, one can also",
"Set the reset mode of gazebo at the beginning of each episode: 1",
"# self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using the __check_subs_and_pubs_connection method, then",
"send an action to the robot \"\"\" raise NotImplementedError() def _get_observation(self): \"\"\" Function",
"enviroment. \"\"\" raise NotImplementedError() def _get_reward(self): \"\"\" Function to get the reward from",
"model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z,",
"Init of Custom Robot env\") #------------------------------------------# # Custom methods for the CustomRobotEnv #",
"here if needed. # def _send_action(self, action): \"\"\" Function to send an action",
"\"\"\" Function to get the observation from the enviroment. \"\"\" raise NotImplementedError() def",
"\"\"\" raise NotImplementedError() def _get_reward(self): \"\"\" Function to get the reward from the",
"If using the __check_subs_and_pubs_connection method, then un-comment the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics()",
"1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to set some parameters, like",
"corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If",
"ros_params # from frobs_rl.common import ros_urdf # from frobs_rl.common import ros_spawn \"\"\" Although",
"for all task envs using the custom robot. \"\"\" def __init__(self): \"\"\" Describe",
"robot environment. \"\"\" # register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # )",
"with a custom world then set the corresponding environment variables. \"\"\" world_path=None world_pkg=None",
"If launching Gazebo with the env then set the corresponding environment variables. \"\"\"",
"\"\"\" raise NotImplementedError() def _get_observation(self): \"\"\" Function to get the observation from the",
"to send an action to the robot \"\"\" raise NotImplementedError() def _get_observation(self): \"\"\"",
"gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo with a custom world then",
"reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers as needed. \"\"\" #",
"gazebo_timestep=None \"\"\" If launching Gazebo with a custom world then set the corresponding",
"step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent class with the corresponding variables. \"\"\" super(CustomRobotEnv,",
"gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo with a custom",
"is \"using step function of Gazebo\". Default is 1. If using the step",
"Task Env, one can use them here if needed. # def _send_action(self, action):",
"the number of steps of Gazebo to take in each episode. Default is",
"urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if",
"rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If launching Gazebo with the env then set",
"# from frobs_rl.common import ros_gazebo # from frobs_rl.common import ros_controllers # from frobs_rl.common",
"If launching Gazebo with a custom world then set the corresponding environment variables.",
"the enviroment. \"\"\" raise NotImplementedError() def _get_reward(self): \"\"\" Function to get the reward",
"it is best to implement these methods in # # the Task Env,",
"If the episode has a success condition then set done as: self.info['is_success'] =",
"# from frobs_rl.common import ros_launch # from frobs_rl.common import ros_params # from frobs_rl.common",
"step function of Gazebo\". Default is 1. If using the step mode 2",
"entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use this",
"\"\"\" Set the reset mode of gazebo at the beginning of each episode:",
"gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file,",
"\"using step function of Gazebo\". Default is 1. If using the step mode",
"of Gazebo. 1 is \"using ROS services\", 2 is \"using step function of",
"__init__ method \"\"\" rospy.loginfo(\"Finished Init of Custom Robot env\") #------------------------------------------# # Custom methods",
"# max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use this for",
"the step mode 2 then set the number of steps of Gazebo to",
"model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the controllers in \"controller_list\" will be",
"namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0;",
"controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\"",
"launching Gazebo with a custom world then set the corresponding environment variables. \"\"\"",
"Default is 1. \"\"\" reset_mode=1 \"\"\" Set the step mode of Gazebo. 1",
"Custom Robot Env, use this for all task envs using the custom robot.",
"1. If using the step mode 2 then set the number of steps",
"the parent class with the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui,",
"model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the controllers in \"controller_list\"",
"gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name,",
"un-comment the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished",
"methods for the CustomRobotEnv # # Although it is best to implement these",
"the robot using the given spawner then set the corresponding environment variables. \"\"\"",
"model_ori_w=0.0 \"\"\" Set if the controllers in \"controller_list\" will be reset at the",
"CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use this for all task envs using the",
"\"\"\" Function to get the reward from the enviroment. \"\"\" raise NotImplementedError() def",
"gym import spaces from gym.envs.registration import register from frobs_rl.envs import robot_BasicEnv import rospy",
"to set some parameters, like the position of the robot, at the begining",
"launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo with a",
"world_filename=None \"\"\" If spawning the robot using the given spawner then set the",
"to get the observation from the enviroment. \"\"\" raise NotImplementedError() def _get_reward(self): \"\"\"",
"__check_subs_and_pubs_connection method, then un-comment the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() #",
"\"\"\" If launching Gazebo with the env then set the corresponding environment variables.",
"# register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\"",
"NotImplementedError() def _check_if_done(self): \"\"\" Function to check if the episode is done. If",
"rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\"",
"def _get_reward(self): \"\"\" Function to get the reward from the enviroment. \"\"\" raise",
"envs using the custom robot. \"\"\" def __init__(self): \"\"\" Describe the robot used",
"robot used in the env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If launching",
"Gazebo. 1 is \"using ROS services\", 2 is \"using step function of Gazebo\".",
"in \"controller_list\" will be reset at the beginning of each episode, default is",
"can also register the robot environment. \"\"\" # register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv',",
"Custom methods for the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function to check if",
"frobs_rl.common import ros_params # from frobs_rl.common import ros_urdf # from frobs_rl.common import ros_spawn",
"as: self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to set",
"1 is \"reset_world\", 2 is \"reset_simulation\". Default is 1. \"\"\" reset_mode=1 \"\"\" Set",
"\"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep,",
"using the __check_subs_and_pubs_connection method, then un-comment the lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() #",
"Gazebo with a custom world then set the corresponding environment variables. \"\"\" world_path=None",
"model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers",
"step mode 2 then set the number of steps of Gazebo to take",
"as needed. \"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states',",
"2 is \"reset_simulation\". Default is 1. \"\"\" reset_mode=1 \"\"\" Set the step mode",
"from gym import spaces from gym.envs.registration import register from frobs_rl.envs import robot_BasicEnv import",
"the custom robot. \"\"\" def __init__(self): \"\"\" Describe the robot used in the",
"ROS services\", 2 is \"using step function of Gazebo\". Default is 1. If",
"reset at the beginning of each episode, default is False. \"\"\" reset_controllers=False \"\"\"",
"set done as: self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function",
"parameters, like the position of the robot, at the begining of each episode.",
"of each episode, default is False. \"\"\" reset_controllers=False \"\"\" Set the reset mode",
"for the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function to check if the Gazebo",
"variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None",
"ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init of Custom Robot env\") #------------------------------------------#",
"the robot used in the env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If",
"\"reset_world\", 2 is \"reset_simulation\". Default is 1. \"\"\" reset_mode=1 \"\"\" Set the step",
"self.info['is_success'] = 1.0 \"\"\" raise NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to set some",
"robot_BasicEnv import rospy #- Uncomment the library modules as neeeed # from frobs_rl.common",
"each episode, default is False. \"\"\" reset_controllers=False \"\"\" Set the reset mode of",
"Although it is best to implement these methods in # # the Task",
"one can use them here if needed. # def _send_action(self, action): \"\"\" Function",
"is 1. If using the step mode 2 then set the number of",
"import ros_node # from frobs_rl.common import ros_launch # from frobs_rl.common import ros_params #",
"pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0;",
"the beginning of each episode, default is False. \"\"\" reset_controllers=False \"\"\" Set the",
"of Custom Robot env\") #------------------------------------------# # Custom methods for the CustomRobotEnv # def",
"ready \"\"\" return True #-------------------------------------------------------# # Custom available methods for the CustomRobotEnv #",
"world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list,",
"episode. Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent class with",
"model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers as needed.",
"ros_spawn \"\"\" Although it is best to register only the task environment, one",
"self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If",
"from frobs_rl.common import ros_launch # from frobs_rl.common import ros_params # from frobs_rl.common import",
"\"\"\" Set the step mode of Gazebo. 1 is \"using ROS services\", 2",
"CustomRobotEnv # # Although it is best to implement these methods in #",
"of gazebo at the beginning of each episode: 1 is \"reset_world\", 2 is",
"controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set",
"\"\"\" Set if the controllers in \"controller_list\" will be reset at the beginning",
"the env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If launching Gazebo with the",
"get the reward from the enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\" Function",
"\"\"\" Custom Robot Env, use this for all task envs using the custom",
"\"\"\" rospy.loginfo(\"Finished Init of Custom Robot env\") #------------------------------------------# # Custom methods for the",
"Gazebo with the env then set the corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True",
"\"\"\" Function to check if the Gazebo and ROS connections are ready \"\"\"",
"Uncomment the library modules as neeeed # from frobs_rl.common import ros_gazebo # from",
"def _check_if_done(self): \"\"\" Function to check if the episode is done. If the",
"the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg,",
"2 is \"using step function of Gazebo\". Default is 1. If using the",
"model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or",
"Function to set some parameters, like the position of the robot, at the",
"spawner then set the corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None",
"model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None urdf_file=None urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0",
"import ros_gazebo # from frobs_rl.common import ros_controllers # from frobs_rl.common import ros_node #",
"lines below. \"\"\" # ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method",
"has a success condition then set done as: self.info['is_success'] = 1.0 \"\"\" raise",
"gym.envs.registration import register from frobs_rl.envs import robot_BasicEnv import rospy #- Uncomment the library",
"robot using the given spawner then set the corresponding environment variables. \"\"\" spawn_robot=False",
"\"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo with",
"model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0 \"\"\" Set if the controllers in \"controller_list\" will be reset",
"launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path, world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace,",
"is 1. \"\"\" reset_mode=1 \"\"\" Set the step mode of Gazebo. 1 is",
"the corresponding environment variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\"",
"given spawner then set the corresponding environment variables. \"\"\" spawn_robot=False model_name_in_gazebo=\"robot\" namespace=\"/robot\" pkg_name=None",
"and ROS connections are ready \"\"\" return True #-------------------------------------------------------# # Custom available methods",
"then set the number of steps of Gazebo to take in each episode.",
"step_mode=step_mode, num_gazebo_steps=num_gazebo_steps) \"\"\" Define publisher or subscribers as needed. \"\"\" # self.pub1 =",
"best to implement these methods in # # the Task Env, one can",
"env\") #------------------------------------------# # Custom methods for the CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function",
"also register the robot environment. \"\"\" # register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', #",
"function of Gazebo\". Default is 1. If using the step mode 2 then",
"register only the task environment, one can also register the robot environment. \"\"\"",
"at the beginning of each episode, default is False. \"\"\" reset_controllers=False \"\"\" Set",
"queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using the __check_subs_and_pubs_connection method,",
"if the controllers in \"controller_list\" will be reset at the beginning of each",
"controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers,",
"neeeed # from frobs_rl.common import ros_gazebo # from frobs_rl.common import ros_controllers # from",
"\"\"\" # self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1)",
"# from frobs_rl.common import ros_spawn \"\"\" Although it is best to register only",
"Gazebo and ROS connections are ready \"\"\" return True #-------------------------------------------------------# # Custom available",
"from frobs_rl.envs import robot_BasicEnv import rospy #- Uncomment the library modules as neeeed",
"reset_mode=1 \"\"\" Set the step mode of Gazebo. 1 is \"using ROS services\",",
"# ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init of Custom Robot env\")",
"rospy.loginfo(\"Finished Init of Custom Robot env\") #------------------------------------------# # Custom methods for the CustomRobotEnv",
"the Gazebo and ROS connections are ready \"\"\" return True #-------------------------------------------------------# # Custom",
"robot. \"\"\" def __init__(self): \"\"\" Describe the robot used in the env. \"\"\"",
"is \"using ROS services\", 2 is \"using step function of Gazebo\". Default is",
"world_pkg=None world_filename=None \"\"\" If spawning the robot using the given spawner then set",
"register( # id='CustomRobotEnv-v0', # entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom",
"env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\" If launching Gazebo with the env",
"ros_gazebo.gazebo_unpause_physics() # self._check_subs_and_pubs_connection() # ros_gazebo.gazebo_pause_physics() \"\"\" Finished __init__ method \"\"\" rospy.loginfo(\"Finished Init of",
"self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using the __check_subs_and_pubs_connection method, then un-comment",
"register from frobs_rl.envs import robot_BasicEnv import rospy #- Uncomment the library modules as",
"action to the robot \"\"\" raise NotImplementedError() def _get_observation(self): \"\"\" Function to get",
"rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using the __check_subs_and_pubs_connection method, then un-comment the lines",
"to check if the episode is done. If the episode has a success",
"method \"\"\" rospy.loginfo(\"Finished Init of Custom Robot env\") #------------------------------------------# # Custom methods for",
"from frobs_rl.common import ros_spawn \"\"\" Although it is best to register only the",
"gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo with a custom world",
"Init the parent class with the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused,",
"use this for all task envs using the custom robot. \"\"\" def __init__(self):",
"episode: 1 is \"reset_world\", 2 is \"reset_simulation\". Default is 1. \"\"\" reset_mode=1 \"\"\"",
"ros_launch # from frobs_rl.common import ros_params # from frobs_rl.common import ros_urdf # from",
"\"\"\" Function to set some parameters, like the position of the robot, at",
"the enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\" Function to check if the",
"reward from the enviroment. \"\"\" raise NotImplementedError() def _check_if_done(self): \"\"\" Function to check",
"set the number of steps of Gazebo to take in each episode. Default",
"the library modules as neeeed # from frobs_rl.common import ros_gazebo # from frobs_rl.common",
"frobs_rl.common import ros_spawn \"\"\" Although it is best to register only the task",
"True #-------------------------------------------------------# # Custom available methods for the CustomRobotEnv # # Although it",
"Custom available methods for the CustomRobotEnv # # Although it is best to",
"spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y,",
"# # the Task Env, one can use them here if needed. #",
"of Gazebo\". Default is 1. If using the step mode 2 then set",
"the robot \"\"\" raise NotImplementedError() def _get_observation(self): \"\"\" Function to get the observation",
"only the task environment, one can also register the robot environment. \"\"\" #",
"all task envs using the custom robot. \"\"\" def __init__(self): \"\"\" Describe the",
"for the CustomRobotEnv # # Although it is best to implement these methods",
"rob_state_publisher_max_freq= rob_state_publisher_max_freq, model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z, model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w, reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps)",
"num_gazebo_steps=1 \"\"\" Init the parent class with the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__(",
"the step mode of Gazebo. 1 is \"using ROS services\", 2 is \"using",
"corresponding environment variables. \"\"\" world_path=None world_pkg=None world_filename=None \"\"\" If spawning the robot using",
"the beginning of each episode: 1 is \"reset_world\", 2 is \"reset_simulation\". Default is",
"to the robot \"\"\" raise NotImplementedError() def _get_observation(self): \"\"\" Function to get the",
"gazebo at the beginning of each episode: 1 is \"reset_world\", 2 is \"reset_simulation\".",
"Describe the robot used in the env. \"\"\" rospy.loginfo(\"Starting Custom Robot Env\") \"\"\"",
"reset_controllers=False \"\"\" Set the reset mode of gazebo at the beginning of each",
"# the Task Env, one can use them here if needed. # def",
"in # # the Task Env, one can use them here if needed.",
"variables. \"\"\" launch_gazebo=False gazebo_init_paused=True gazebo_use_gui=True gazebo_recording=False gazebo_freq=100 gazebo_max_freq=None gazebo_timestep=None \"\"\" If launching Gazebo",
"return True #-------------------------------------------------------# # Custom available methods for the CustomRobotEnv # # Although",
"import rospy #- Uncomment the library modules as neeeed # from frobs_rl.common import",
"mode of gazebo at the beginning of each episode: 1 is \"reset_world\", 2",
"Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init the parent class with the",
"done. If the episode has a success condition then set done as: self.info['is_success']",
"it is best to register only the task environment, one can also register",
"mode 2 then set the number of steps of Gazebo to take in",
"best to register only the task environment, one can also register the robot",
"is \"reset_simulation\". Default is 1. \"\"\" reset_mode=1 \"\"\" Set the step mode of",
"frobs_rl.common import ros_controllers # from frobs_rl.common import ros_node # from frobs_rl.common import ros_launch",
"CustomRobotEnv # def _check_subs_and_pubs_connection(self): \"\"\" Function to check if the Gazebo and ROS",
"world_path=None world_pkg=None world_filename=None \"\"\" If spawning the robot using the given spawner then",
"to take in each episode. Default is 1. \"\"\" step_mode=1 num_gazebo_steps=1 \"\"\" Init",
"class with the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused, gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq,",
"Custom Robot env\") #------------------------------------------# # Custom methods for the CustomRobotEnv # def _check_subs_and_pubs_connection(self):",
"\"using ROS services\", 2 is \"using step function of Gazebo\". Default is 1.",
"mode of Gazebo. 1 is \"using ROS services\", 2 is \"using step function",
"If using the step mode 2 then set the number of steps of",
"_send_action(self, action): \"\"\" Function to send an action to the robot \"\"\" raise",
"some parameters, like the position of the robot, at the begining of each",
"# entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv', # max_episode_steps=10000, # ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use",
"_get_observation(self): \"\"\" Function to get the observation from the enviroment. \"\"\" raise NotImplementedError()",
"urdf_folder=\"/urdf\" controller_file=None controller_list=None urdf_xacro_args=None rob_state_publisher_max_freq= None model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0 model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0",
"NotImplementedError() def _set_episode_init_params(self): \"\"\" Function to set some parameters, like the position of",
"one can also register the robot environment. \"\"\" # register( # id='CustomRobotEnv-v0', #",
"1. \"\"\" reset_mode=1 \"\"\" Set the step mode of Gazebo. 1 is \"using",
"ros_node # from frobs_rl.common import ros_launch # from frobs_rl.common import ros_params # from",
"gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq,",
"implement these methods in # # the Task Env, one can use them",
"def _get_observation(self): \"\"\" Function to get the observation from the enviroment. \"\"\" raise",
"position of the robot, at the begining of each episode. \"\"\" raise NotImplementedError()",
"\"\"\" Init the parent class with the corresponding variables. \"\"\" super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo,",
"environment, one can also register the robot environment. \"\"\" # register( # id='CustomRobotEnv-v0',",
"from frobs_rl.common import ros_gazebo # from frobs_rl.common import ros_controllers # from frobs_rl.common import",
"world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep, spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name, urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list, urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq=",
"beginning of each episode: 1 is \"reset_world\", 2 is \"reset_simulation\". Default is 1.",
"= rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1) # self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1) \"\"\" If using",
"Function to check if the episode is done. If the episode has a",
"\"\"\" If using the __check_subs_and_pubs_connection method, then un-comment the lines below. \"\"\" #",
"#!/bin/python3 from gym import spaces from gym.envs.registration import register from frobs_rl.envs import robot_BasicEnv",
"import ros_controllers # from frobs_rl.common import ros_node # from frobs_rl.common import ros_launch #",
"be reset at the beginning of each episode, default is False. \"\"\" reset_controllers=False",
"\"\"\" reset_mode=1 \"\"\" Set the step mode of Gazebo. 1 is \"using ROS",
"# ) class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv): \"\"\" Custom Robot Env, use this for all task",
"self.callback1) \"\"\" If using the __check_subs_and_pubs_connection method, then un-comment the lines below. \"\"\""
] |
[
"the calendar quarter following the current quarter. For example, an order placed during",
"Volatility = 'VOL' class TimeInForce(str, enum.Enum): Day = \"DAY\" # Valid for the",
"self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message:",
"None # type: int self.why_held = None # type: str self.market_cap_price = None",
"type: float self.min_commission = None # type: float self.max_commission = None # type:",
"trailing amount, etc. self.aux_price = None # type: float # extended order fields",
"= None # type: Action self.total_quantity = 0.0 self.order_type = OrderType.Market # The",
"of the first quarter of 2012. If the last day is a non-trading",
"message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE",
"orders had specific watermark price attribs in server version 26 # double lower",
"-1 # SMART routing only self.discretionary_amount = 0.0 self.etrade_only = True self.firm_quote_only =",
"= 0 self.outside_regular_trading_hours = False self.hidden = False self.good_after_time = None # type:",
"In all other cases specify zero. For # relative orders with no limit",
"= False self.trail_stop_price = None # type: float self.trailing_percent = None # type:",
"# EFP orders only # SCALE ORDERS ONLY self.scale_init_level_size = None # type:",
"pragma: no cover (I don't have actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount,",
"COMBO ORDERS ONLY self.basis_points = None # type: float # EFP orders only",
"price. Used for limit, stop-limit and relative orders. In all other cases specify",
"continue to work within the system and in the marketplace until it executes",
"self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\" updated",
"order fields self.time_in_force = TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time = None #",
"= None # type: float self.opt_out_smart_routing = False # BOX exchange orders only",
"following the current quarter. # Orders submitted to IB that remain in force",
"# institutional (ie non-cleared) only self.open_close = \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot =",
"these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for (k, v) in self.order_miscellaneous_options.items()))",
"\"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\" updated = Event() # type: Event[None]",
"canceled. ImmediateOrCancel = \"IOC\" # Good until Date. It will remain working within",
"self.good_after_time = None # type: datetime.datetime self.good_till_date = None # type: datetime.datetime self.rule80a",
"hedge # Clearing info self.account = \"\" # IB account self.settling_firm = \"\"",
"be cancelled on the preceding Friday. # Orders that are modified will be",
"= None # type: str self.commission = None # type: float self.min_commission =",
"# financial advisors only self.fa_group = \"\" self.fa_profile = \"\" self.fa_method = \"\"",
"False self.hidden = False self.good_after_time = None # type: datetime.datetime self.good_till_date = None",
"= None # type: float self.scale_auto_reset = False self.scale_init_position = None # type:",
"type: int self.scale_init_fill_quantity = None # type: int self.scale_random_percent = False self.scale_table =",
"# double lower = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeLower;",
"int self.scale_profit_offset = None # type: float self.scale_auto_reset = False self.scale_init_position = None",
"self.basis_points = None # type: float # EFP orders only self.basis_points_type = None",
"None # type: int self.scale_profit_offset = None # type: float self.scale_auto_reset = False",
"type: int # 1=Average, 2 = BidOrAsk # COMBO ORDERS ONLY self.basis_points =",
"False self.scale_table = \"\" # # HEDGE ORDERS self.hedge_type = \"\" # 'D'",
"combination of the two. GoodTillCancel = \"GTC\" # Immediate or Cancel. Any portion",
"\"\" self.oca_type = 0 self.order_reference = \"\" self.transmit = True self.parent_id = 0",
"\"\" # institutional (ie non-cleared) only self.open_close = \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot",
"# Volatility orders (srv v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type:",
"= None # type: str self.market_cap_price = None # type: float self.order_ref =",
"None # type: datetime.datetime self.active_stop_time = None # type: datetime.datetime self.oca_group = \"\"",
"OrderType.Market # The LIMIT price. Used for limit, stop-limit and relative orders. In",
"float self.adjusted_stop_price = 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float",
"= \"GTD\" Opening = \"OPG\" # Use OPG to send a market-on-open (MOO)",
"a market-on-open (MOO) or limit-on-open (LOO) self # If the entire Fill-or-Kill order",
"it becomes available in the market is canceled. ImmediateOrCancel = \"IOC\" # Good",
"self.commission_currency = None # type: str self.warning_text = None # type: str self.order_id",
"'PEG MID' PeggedToMarket = 'PEG MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark = 'PEG",
"is a non-trading day, # the cancellation will occur at the close of",
"the end of the calendar quarter following the current quarter. For example, an",
"message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG' # not supported",
"self.trigger_method = 0 self.outside_regular_trading_hours = False self.hidden = False self.good_after_time = None #",
"Cancel. Any portion that is not filled as soon as it becomes available",
"Event() # type: Event[None] on_execution = Event() # type: Event[execution.Execution] def serialize(self, message:",
"only self.open_close = \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location = \"\"",
"# type: str self.equity_with_loan = None # type: str self.commission = None #",
"AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had specific watermark price attribs",
"suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\" self.soft_dollar_tier_name =",
"0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined",
"# True beneficiary of the order self.clearing_intent = \"\" # \"\" (Default), \"IB\",",
"AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had specific watermark price",
"Annual = 2 class OrderType(str, enum.Enum): Undefined = \"\" BoxTop = 'BOX TOP'",
"be reduced for dividends. To allow # adjustment to your order price on",
"above) message.add(self.short_sale_slot) # # 0 for retail, 1 or 2 for institutions message.add(self.designated_location)",
"# type: float self.stock_ref_price = None # type: float self.delta = None #",
"= 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0 self.reference_exchange_id =",
"message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size)",
"= 1 Annual = 2 class OrderType(str, enum.Enum): Undefined = \"\" BoxTop =",
"1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount = 1.7976931348623157e+308",
"0 self.conditions = [] # type: typing.List[None] # not suppored yet self.conditions_cancel_order =",
"don't have actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot)",
"self.what_if = False # Not Held self.not_held = False self.solicited = False self.model_code",
"float self.volatility_type = None # type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None",
"in server version 26 # double lower = (protocol_version == 26 && isVolOrder)",
"have actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for",
"self.stock_range_upper = None # type: float self.randomize_price = False self.randomize_size = False #",
"filled as soon as it becomes available in the market is canceled. ImmediateOrCancel",
"self.solicited = False self.model_code = \"\" self.order_miscellaneous_options = {} # type: typing.Dict[str, str]",
"TimeInForce(str, enum.Enum): Day = \"DAY\" # Valid for the day only. # Good",
"= 1.7976931348623157e+308 # type: float self.limit_price_offset = 1.7976931348623157e+308 # type: float self.adjusted_stop_price =",
"is Sunday, the orders will be cancelled on the preceding Friday. # Orders",
"remain in force for more than one day will not be reduced for",
"type: int self.last_fill_price = None # type: float self.client_id = None # type:",
"TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time = None # type: datetime.datetime self.active_stop_time =",
"? DBL_MAX : selfstockRangeLower; # double upper = (protocol_version == 26 && isVolOrder)",
"(Default), \"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters",
"BoxTop = 'BOX TOP' Limit = 'LMT' LimitIfTouched = 'LIT' LimitOnClose = 'LOC'",
"Match = 1 Improvement = 2 Transparent = 3 class Action(str, enum.Enum): Buy",
"order type, or a combination of the two. GoodTillCancel = \"GTC\" # Immediate",
"noqa from ib_async.errors import UnsupportedFeature from ib_async.event import Event from ib_async import execution",
"\"\" updated = Event() # type: Event[None] on_execution = Event() # type: Event[execution.Execution]",
"# type: str self.filled = None # type: float self.remaining = None #",
"# type: Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id",
"self.adjustable_trailing_unit = 0 self.conditions = [] # type: typing.List[None] # not suppored yet",
"message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type",
"dividends. To allow # adjustment to your order price on ex-dividend date, consider",
"day is a non-trading day, # the cancellation will occur at the close",
"(protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeLower; # double upper =",
"&& isVolOrder) ? DBL_MAX : selfstockRangeLower; # double upper = (protocol_version == 26",
"results in a stock split (forward or reverse), exchange for shares, or distribution",
"False # Not Held self.not_held = False self.solicited = False self.model_code = \"\"",
"type: int self.parent_id = None # type: int self.last_fill_price = None # type:",
"int # 1=Average, 2 = BidOrAsk # COMBO ORDERS ONLY self.basis_points = None",
"self.stock_ref_price = None # type: float self.delta = None # type: float #",
"Good until canceled. The order will continue to work within the system and",
"using a Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT) order type, or a combination",
"self.active_stop_time = None # type: datetime.datetime self.oca_group = \"\" self.oca_type = 0 self.order_reference",
"# type: float self.adjustable_trailing_unit = 0 self.conditions = [] # type: typing.List[None] #",
"orders only # SCALE ORDERS ONLY self.scale_init_level_size = None # type: int self.scale_subs_level_size",
"= None # type: int self.scale_init_fill_quantity = None # type: int self.scale_random_percent =",
"\"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\"",
"SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily = 1 Annual = 2 class OrderType(str,",
"None # type: float self.scale_auto_reset = False self.scale_init_position = None # type: int",
"Orders submitted to IB that remain in force for more than one day",
"= None # type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None # type:",
"actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma: no cover (I",
"canceled. The order will continue to work within the system and in the",
"current quarter. # Orders submitted to IB that remain in force for more",
"conditions: If a corporate # action on a security results in a stock",
"self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message: IncomingMessage): assert False, \"Implemented in",
"or reverse), exchange for shares, or distribution of # shares. If you do",
"# pragma: no cover (Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions",
"message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if",
"is canceled. GTC orders will be automatically be cancelled under the following conditions:",
"float self.scale_price_adjust_interval = None # type: int self.scale_profit_offset = None # type: float",
"# type: typing.Dict[str, str] self.algo_id = \"\" # What-if self.what_if = False #",
"Expire” date consistent with the end of the calendar # quarter following the",
"self.ext_operator = \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\" #",
"reduced for dividends. To allow # adjustment to your order price on ex-dividend",
"log into your IB account for 90 days. # At the end of",
"portion that is not filled as soon as it becomes available in the",
"Fill-or-Kill order does not execute as soon as it becomes available, the entire",
"= 0 self.perm_id = 0 # main order fields self.action = None #",
"self.clearing_account = \"\" # True beneficiary of the order self.clearing_intent = \"\" #",
"(I don't have actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale)",
"first quarter of 2012. If the last day is a non-trading day, #",
"# adjustment to your order price on ex-dividend date, consider using a Good-Til-Date/Time",
"above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no cover (I don't",
"VolatilityType(enum.Enum): Daily = 1 Annual = 2 class OrderType(str, enum.Enum): Undefined = \"\"",
"# type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None # type: float self.delta_neutral_contract_id",
"# VOLATILITY ORDERS ONLY self.volatility = None # type: float self.volatility_type = None",
"noqa from ib_async.instrument import Instrument # noqa from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion,",
"implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price,",
"cover (I don't have actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close)",
"= AuctionStrategy.Unset self.starting_price = None # type: float self.stock_ref_price = None # type:",
"corporate # action on a security results in a stock split (forward or",
"class VolatilityType(enum.Enum): Daily = 1 Annual = 2 class OrderType(str, enum.Enum): Undefined =",
"executes or until the # close of the market on the date specified",
"\"DAY\" # Valid for the day only. # Good until canceled. The order",
"= None # type: float self.stock_ref_price = None # type: float self.delta =",
"= \"\" # IB account self.settling_firm = \"\" self.clearing_account = \"\" # True",
"only. # Good until canceled. The order will continue to work within the",
"stop price for STP LMT orders, trailing amount, etc. self.aux_price = None #",
"self.scale_price_adjust_interval = None # type: int self.scale_profit_offset = None # type: float self.scale_auto_reset",
"Action self.total_quantity = 0.0 self.order_type = OrderType.Market # The LIMIT price. Used for",
"= False self.scale_init_position = None # type: int self.scale_init_fill_quantity = None # type:",
"\"\" BoxTop = 'BOX TOP' Limit = 'LMT' LimitIfTouched = 'LIT' LimitOnClose =",
"message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG' # not supported message.add(\"\") #",
"# type: int self.scale_price_increment = None # type: float self.scale_price_adjust_value = None #",
"no cover (I don't have actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy:",
"saleslot data (srv v18 and above) message.add(self.short_sale_slot) # # 0 for retail, 1",
"messages self.status = None # type: str self.filled = None # type: float",
"two. GoodTillCancel = \"GTC\" # Immediate or Cancel. Any portion that is not",
"'BOX TOP' Limit = 'LMT' LimitIfTouched = 'LIT' LimitOnClose = 'LOC' Market =",
"only self.stock_range_lower = None # type: float self.stock_range_upper = None # type: float",
"for STP LMT orders, trailing amount, etc. self.aux_price = None # type: float",
"market is canceled. ImmediateOrCancel = \"IOC\" # Good until Date. It will remain",
"don't have actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma: no",
"short saleslot data (srv v18 and above) message.add(self.short_sale_slot) # # 0 for retail,",
"self.oca_group = \"\" self.oca_type = 0 self.order_reference = \"\" self.transmit = True self.parent_id",
"self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader,",
"# for GTC orders. self.active_start_time = None # type: datetime.datetime self.active_stop_time = None",
"message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if self.hedge_type: #",
"message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price)",
"= \"\" # True beneficiary of the order self.clearing_intent = \"\" # \"\"",
"for institutions message.add(self.designated_location) # # populate only when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type)",
"type: float self.scale_auto_reset = False self.scale_init_position = None # type: int self.scale_init_fill_quantity =",
"have actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions:",
"one day will not be reduced for dividends. To allow # adjustment to",
"'REL' RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo = 'REL + MKT' Stop =",
"# type: typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False",
"cancelled on the preceding Friday. # Orders that are modified will be assigned",
"# type: float self.scale_price_adjust_value = None # type: float self.scale_price_adjust_interval = None #",
"# type: float self.randomize_price = False self.randomize_size = False # VOLATILITY ORDERS ONLY",
"False self.scale_init_position = None # type: int self.scale_init_fill_quantity = None # type: int",
"Date. It will remain working within the system and in the marketplace until",
"BENCH\": # pragma: no cover (I don't have actual examples of these) message.add(self.reference_contract_id,",
"contain the stop price for STP LMT orders, trailing amount, etc. self.aux_price =",
"message.add(self.delta) # Volatility orders had specific watermark price attribs in server version 26",
"OrderOrigin(enum.Enum): Customer = 0 Firm = 1 Unknown = 2 class AuctionStrategy(enum.Enum): Unset",
"LMT\" StopWithProtection = \"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility",
"# type: datetime.datetime self.rule80a = \"\" self.all_or_none = False self.min_quantity = None #",
"also specify zero. self.limit_price = None # type: float # Generic field to",
"= \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\" # native",
"Valid for the day only. # Good until canceled. The order will continue",
"becomes available in the market is canceled. ImmediateOrCancel = \"IOC\" # Good until",
"sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional",
"float self.stock_range_upper = None # type: float self.randomize_price = False self.randomize_size = False",
"__init__(self, parent: ProtocolInterface) -> None: self._parent = parent self.instrument = None # type:",
"self.min_commission = None # type: float self.max_commission = None # type: float self.commission_currency",
"False self.min_quantity = None # type: int self.percent_offset = None # type: float",
"order is canceled. FillOrKill = \"FOK\" DayTillCancel = \"DTC\" # Day until Canceled",
"= \"\" self.continuous_update = False self.reference_price_type = None # type: int # 1=Average,",
"execution # noqa from ib_async.instrument import Instrument # noqa from ib_async.protocol import ProtocolInterface,",
"None # type: datetime.datetime self.rule80a = \"\" self.all_or_none = False self.min_quantity = None",
"the market on the date specified GoodTillDate = \"GTD\" Opening = \"OPG\" #",
"until Canceled class Order(Serializable): def __init__(self, parent: ProtocolInterface) -> None: self._parent = parent",
"an order placed during the third # quarter of 2011 will be canceled",
"0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\"",
"example, if the last day # of the quarter is Sunday, the orders",
"# main order fields self.action = None # type: Action self.total_quantity = 0.0",
"last day # of the quarter is Sunday, the orders will be cancelled",
"message.add(self.algo_strategy) if self.algo_strategy: # pragma: no cover (I don't have actual examples of",
"ONLY self.basis_points = None # type: float # EFP orders only self.basis_points_type =",
"self.fa_method = \"\" self.fa_percentage = \"\" # institutional (ie non-cleared) only self.open_close =",
"> 0.0: # pragma: no cover (I don't have actual examples of these)",
"# type: int self.percent_offset = None # type: float self.override_percentage_constraints = False self.trail_stop_price",
"if self.instrument.underlying_component: # pragma: no cover (I don't have actual examples of these)",
"don't have actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if",
"Sunday, the orders will be cancelled on the preceding Friday. # Orders that",
"False self.model_code = \"\" self.order_miscellaneous_options = {} # type: typing.Dict[str, str] self.reference_contract_id =",
"= \"\" self.clearing_account = \"\" # True beneficiary of the order self.clearing_intent =",
"to contain the stop price for STP LMT orders, trailing amount, etc. self.aux_price",
"# type: float self.volatility_type = None # type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price",
"self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\" # native cash quantity self.cash_quantity = 1.7976931348623157e+308",
"retail, 1 or 2 for institutions message.add(self.designated_location) # # populate only when shortSaleSlot",
"following the current quarter. For example, an order placed during the third #",
"None: self._parent = parent self.instrument = None # type: Instrument # Filled by",
"LimitIfTouched = 'LIT' LimitOnClose = 'LOC' Market = 'MKT' MarketIfTouched = 'MIT' MarketOnClose",
"self.order_type == \"PEG BENCH\": # pragma: no cover (I don't have actual examples",
"message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id = None",
"info self.account = \"\" # IB account self.settling_firm = \"\" self.clearing_account = \"\"",
"'MKT PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket = 'PEG",
"# type: float self.scale_auto_reset = False self.scale_init_position = None # type: int self.scale_init_fill_quantity",
"\"\" # \"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO ORDERS ONLY self.algo_strategy",
"import Event from ib_async import execution # noqa from ib_async.instrument import Instrument #",
"= \"GTC\" # Immediate or Cancel. Any portion that is not filled as",
"= 0 self.client_id = 0 self.perm_id = 0 # main order fields self.action",
"0 # main order fields self.action = None # type: Action self.total_quantity =",
"quarter. For example, an order placed during the third # quarter of 2011",
"= 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type:",
"message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if self.hedge_type: # pragma: no cover",
": selfstockRangeLower; # double upper = (protocol_version == 26 && isVolOrder) ? DBL_MAX",
"type: int self.why_held = None # type: str self.market_cap_price = None # type:",
"cover (I don't have actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: #",
"= False self.display_size = 0 self.trigger_method = 0 self.outside_regular_trading_hours = False self.hidden =",
"to stock and VOL orders only self.stock_range_lower = None # type: float self.stock_range_upper",
"self.order_type = OrderType.Market # The LIMIT price. Used for limit, stop-limit and relative",
"new “Auto Expire” date consistent with the end of the calendar # quarter",
"None # type: float # EFP orders only self.basis_points_type = None # type:",
"self.conditions: # pragma: no cover (Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, # no",
"= None # type: datetime.datetime self.oca_group = \"\" self.oca_type = 0 self.order_reference =",
"self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\" updated = Event() # type: Event[None] on_execution",
"message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had",
"HEDGE ORDERS self.hedge_type = \"\" # 'D' - delta, 'B' - beta, 'F'",
"marketplace until it executes # or is canceled. GTC orders will be automatically",
"= 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit =",
"message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot",
"self.designated_location = \"\" self.exempt_code = -1 # SMART routing only self.discretionary_amount = 0.0",
"# noqa from ib_async.instrument import Instrument # noqa from ib_async.protocol import ProtocolInterface, Serializable,",
"True self.firm_quote_only = True self.nbbo_price_cap = None # type: float self.opt_out_smart_routing = False",
"None # type: str self.equity_with_loan = None # type: str self.commission = None",
"= OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type: float self.limit_price_offset = 1.7976931348623157e+308 # type:",
"# type: str self.order_id = 0 self.client_id = 0 self.perm_id = 0 #",
"fields self.action = None # type: Action self.total_quantity = 0.0 self.order_type = OrderType.Market",
"= False self.good_after_time = None # type: datetime.datetime self.good_till_date = None # type:",
"None # type: float # Generic field to contain the stop price for",
"from ib_async import execution # noqa from ib_async.instrument import Instrument # noqa from",
"= OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location = \"\" self.exempt_code = -1 # SMART",
"these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE",
"FillOrKill = \"FOK\" DayTillCancel = \"DTC\" # Day until Canceled class Order(Serializable): def",
"class OrderOrigin(enum.Enum): Customer = 0 Firm = 1 Unknown = 2 class AuctionStrategy(enum.Enum):",
"(k, v) for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type ==",
"examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma:",
"self.starting_price = None # type: float self.stock_ref_price = None # type: float self.delta",
"message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had specific watermark price attribs in server version",
"quarter following the current quarter. For example, an order placed during the third",
"RelativeMarketCombo = 'REL + MKT' Stop = \"STP\" StopLimit = \"STP LMT\" StopWithProtection",
"and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no cover (I",
"on a security results in a stock split (forward or reverse), exchange for",
"None # type: float self.override_percentage_constraints = False self.trail_stop_price = None # type: float",
"= 'REL + LMT' RelativeMarketCombo = 'REL + MKT' Stop = \"STP\" StopLimit",
"pragma: no cover (I don't have actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account)",
"self.good_till_date = None # type: datetime.datetime self.rule80a = \"\" self.all_or_none = False self.min_quantity",
"0 self.order_reference = \"\" self.transmit = True self.parent_id = 0 self.block_order = False",
"self.trail_stop_price = None # type: float self.trailing_percent = None # type: float #",
"message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data (srv v18 and above) message.add(self.short_sale_slot) #",
"message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no cover (I don't have",
"GoodTillDate = \"GTD\" Opening = \"OPG\" # Use OPG to send a market-on-open",
"= 'MOC' MarketToLimit = 'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative = 'PASSV REL'",
"with the end of the calendar # quarter following the current quarter. #",
"type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent =",
"on_execution = Event() # type: Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if",
"None # type: str self.order_id = 0 self.client_id = 0 self.perm_id = 0",
"typing.Dict[str, str] self.algo_id = \"\" # What-if self.what_if = False # Not Held",
"(PostTrade) # ALGO ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters = {} # type:",
"have actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location)",
"that is not filled as soon as it becomes available in the market",
"class AuctionStrategy(enum.Enum): Unset = 0 Match = 1 Improvement = 2 Transparent =",
"self.instrument.underlying_component: # pragma: no cover (I don't have actual examples of these) message.add(self.instrument.underlying_component)",
"False self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\"",
"= \"\" self.fa_profile = \"\" self.fa_method = \"\" self.fa_percentage = \"\" # institutional",
"message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type)",
"self.scale_price_increment and self.scale_price_increment > 0.0: # pragma: no cover (I don't have actual",
"# type: int self.why_held = None # type: str self.market_cap_price = None #",
"float self.perm_id = None # type: int self.parent_id = None # type: int",
"self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type: float self.limit_price_offset = 1.7976931348623157e+308 #",
"IB account for 90 days. # At the end of the calendar quarter",
"day, # the cancellation will occur at the close of the final trading",
"Good until Date. It will remain working within the system and in the",
"STK' PeggedToBenchmark = 'PEG BENCH' Relative = 'REL' RelativeLimitCombo = 'REL + LMT'",
"price attribs in server version 26 # double lower = (protocol_version == 26",
"int # EFP orders only # SCALE ORDERS ONLY self.scale_init_level_size = None #",
"= \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type: float self.limit_price_offset =",
"= 0 self.conditions = [] # type: typing.List[None] # not suppored yet self.conditions_cancel_order",
"will occur at the close of the final trading day of that quarter.",
"no cover (I don't have actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount,",
"# EFP orders only self.basis_points_type = None # type: int # EFP orders",
"will be canceled at the end of the first quarter of 2012. If",
"\"\" # What-if self.what_if = False # Not Held self.not_held = False self.solicited",
"None # type: float self.max_commission = None # type: float self.commission_currency = None",
"None # type: int self.scale_random_percent = False self.scale_table = \"\" # # HEDGE",
"1.7976931348623157e+308 # type: float self.adjustable_trailing_unit = 0 self.conditions = [] # type: typing.List[None]",
"= False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\" self.continuous_update = False self.reference_price_type =",
"self.stock_range_lower = None # type: float self.stock_range_upper = None # type: float self.randomize_price",
"# of the quarter is Sunday, the orders will be cancelled on the",
"Instrument # noqa from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum):",
"message.add(0, # no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator,",
"= 'BOX TOP' Limit = 'LMT' LimitIfTouched = 'LIT' LimitOnClose = 'LOC' Market",
"float self.randomize_price = False self.randomize_size = False # VOLATILITY ORDERS ONLY self.volatility =",
"# institutional short saleslot data (srv v18 and above) message.add(self.short_sale_slot) # # 0",
"= 'SLONG' class VolatilityType(enum.Enum): Daily = 1 Annual = 2 class OrderType(str, enum.Enum):",
"= \"\" # 'D' - delta, 'B' - beta, 'F' - FX, 'P'",
"True beneficiary of the order self.clearing_intent = \"\" # \"\" (Default), \"IB\", \"Away\",",
"field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short",
"self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo,",
"non-trading day, # the cancellation will occur at the close of the final",
"# 'D' - delta, 'B' - beta, 'F' - FX, 'P' - pair",
"not filled as soon as it becomes available in the market is canceled.",
"\"\" self.algo_parameters = {} # type: typing.Dict[str, str] self.smart_combo_routing_params = {} # type:",
"1 Annual = 2 class OrderType(str, enum.Enum): Undefined = \"\" BoxTop = 'BOX",
"LimitOnClose = 'LOC' Market = 'MKT' MarketIfTouched = 'MIT' MarketOnClose = 'MOC' MarketToLimit",
"quarter of 2011 will be canceled at the end of the first quarter",
"the stop price for STP LMT orders, trailing amount, etc. self.aux_price = None",
"self.equity_with_loan = None # type: str self.commission = None # type: float self.min_commission",
"VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None # type: float self.delta_neutral_contract_id = 0",
"type: str self.inital_margin = None # type: str self.maintenance_margin = None # type:",
"0.0 self.order_type = OrderType.Market # The LIMIT price. Used for limit, stop-limit and",
"until the # close of the market on the date specified GoodTillDate =",
"orders message.add(self.hedge_type) if self.hedge_type: # pragma: no cover (I don't have actual examples",
"# not supported message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method)",
"'beta=X' value for beta hedge, 'ratio=Y' for pair hedge # Clearing info self.account",
"the last day # of the quarter is Sunday, the orders will be",
"etc. self.aux_price = None # type: float # extended order fields self.time_in_force =",
"\"\" self.fa_percentage = \"\" # institutional (ie non-cleared) only self.open_close = \"O\" self.origin",
"str self.equity_with_loan = None # type: str self.commission = None # type: float",
"relative orders with no limit price, also specify zero. self.limit_price = None #",
"specified GoodTillDate = \"GTD\" Opening = \"OPG\" # Use OPG to send a",
"None # type: int # 1=Average, 2 = BidOrAsk # COMBO ORDERS ONLY",
"self.scale_init_fill_quantity = None # type: int self.scale_random_percent = False self.scale_table = \"\" #",
"self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no cover (Not implemented)",
"self.perm_id = 0 # main order fields self.action = None # type: Action",
"HEDGE orders message.add(self.hedge_type) if self.hedge_type: # pragma: no cover (I don't have actual",
"str self.combo_legs_description = None # type: str self.inital_margin = None # type: str",
"# Use OPG to send a market-on-open (MOO) or limit-on-open (LOO) self #",
"a corporate # action on a security results in a stock split (forward",
"\"\" self.exempt_code = -1 # SMART routing only self.discretionary_amount = 0.0 self.etrade_only =",
"- delta, 'B' - beta, 'F' - FX, 'P' - pair self.hedge_param =",
"if self.scale_price_increment and self.scale_price_increment > 0.0: # pragma: no cover (I don't have",
"a security results in a stock split (forward or reverse), exchange for shares,",
"type: str self.maintenance_margin = None # type: str self.equity_with_loan = None # type:",
"at the end of the first quarter of 2012. If the last day",
"= 1 Unknown = 2 class AuctionStrategy(enum.Enum): Unset = 0 Match = 1",
"actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: #",
"Volatility orders (srv v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: #",
"self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id = None message.add(security_id_type) message.add(security_id)",
"= 'PASSV REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket = 'PEG MKT' PeggedToStock =",
"= \"\" # native cash quantity self.cash_quantity = 1.7976931348623157e+308 # type: float self.mifid2_decision_maker",
"cash quantity self.cash_quantity = 1.7976931348623157e+308 # type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo =",
"False self.trail_stop_price = None # type: float self.trailing_percent = None # type: float",
"\"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\" updated = Event()",
"= 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit = 0 self.conditions = [] # type:",
"= \"\" # \"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO ORDERS ONLY",
"message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta)",
"message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data (srv v18",
"= 'SELL' SShort = 'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily = 1",
"self.basis_points_type = None # type: int # EFP orders only # SCALE ORDERS",
"or Cancel. Any portion that is not filled as soon as it becomes",
"FX, 'P' - pair self.hedge_param = \"\" # 'beta=X' value for beta hedge,",
"# pragma: no cover (I don't have actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval)",
"'VOL' class TimeInForce(str, enum.Enum): Day = \"DAY\" # Valid for the day only.",
"= \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale =",
"= 0 # main order fields self.action = None # type: Action self.total_quantity",
"= \"FOK\" DayTillCancel = \"DTC\" # Day until Canceled class Order(Serializable): def __init__(self,",
"\"GTD\" Opening = \"OPG\" # Use OPG to send a market-on-open (MOO) or",
"noqa from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer =",
"until Date. It will remain working within the system and in the marketplace",
"canceled. GTC orders will be automatically be cancelled under the following conditions: If",
"= False self.min_quantity = None # type: int self.percent_offset = None # type:",
"OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location = \"\" self.exempt_code = -1 # SMART routing",
"# type: str self.commission = None # type: float self.min_commission = None #",
"# type: typing.List[None] # not suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False",
"AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had specific watermark price attribs in",
"final trading day of that quarter. For example, if the last day #",
"\"PEG BENCH\": # pragma: no cover (I don't have actual examples of these)",
"self.display_size = 0 self.trigger_method = 0 self.outside_regular_trading_hours = False self.hidden = False self.good_after_time",
"to IB that remain in force for more than one day will not",
"'REL + LMT' RelativeMarketCombo = 'REL + MKT' Stop = \"STP\" StopLimit =",
"not be reduced for dividends. To allow # adjustment to your order price",
"MID' PeggedToMarket = 'PEG MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark = 'PEG BENCH'",
"message.add(self.short_sale_slot) # # 0 for retail, 1 or 2 for institutions message.add(self.designated_location) #",
"don't have actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent)",
"2 class OrderType(str, enum.Enum): Undefined = \"\" BoxTop = 'BOX TOP' Limit =",
"\"\" self.clearing_account = \"\" # True beneficiary of the order self.clearing_intent = \"\"",
"0 self.perm_id = 0 # main order fields self.action = None # type:",
"calendar # quarter following the current quarter. # Orders submitted to IB that",
"the quarter is Sunday, the orders will be cancelled on the preceding Friday.",
"GTC orders. self.active_start_time = None # type: datetime.datetime self.active_stop_time = None # type:",
"ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0 Firm = 1 Unknown =",
"Instrument # Filled by status messages self.status = None # type: str self.filled",
"that are modified will be assigned a new “Auto Expire” date consistent with",
"TOP' Limit = 'LMT' LimitIfTouched = 'LIT' LimitOnClose = 'LOC' Market = 'MKT'",
"will remain working within the system and in the marketplace until it executes",
"self.parent_id = 0 self.block_order = False self.sweep_to_fill = False self.display_size = 0 self.trigger_method",
"self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no cover (Not implemented) raise UnsupportedFeature(\"Order",
"# type: int self.scale_random_percent = False self.scale_table = \"\" # # HEDGE ORDERS",
"OPG to send a market-on-open (MOO) or limit-on-open (LOO) self # If the",
"= None # type: int self.last_fill_price = None # type: float self.client_id =",
"1.7976931348623157e+308 # type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\"",
"execute as soon as it becomes available, the entire order is canceled. FillOrKill",
"= 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close =",
"self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\" self.continuous_update",
"of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma: no cover (I don't have",
"ORDERS ONLY self.volatility = None # type: float self.volatility_type = None # type:",
"(I don't have actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k,",
"type: datetime.datetime self.good_till_date = None # type: datetime.datetime self.rule80a = \"\" self.all_or_none =",
"# type: float self.remaining = None # type: float self.average_fill_price = None #",
"Opening = \"OPG\" # Use OPG to send a market-on-open (MOO) or limit-on-open",
"self.sweep_to_fill = False self.display_size = 0 self.trigger_method = 0 self.outside_regular_trading_hours = False self.hidden",
"type: float self.scale_price_adjust_interval = None # type: int self.scale_profit_offset = None # type:",
"automatically be cancelled under the following conditions: If a corporate # action on",
"= 0 self.block_order = False self.sweep_to_fill = False self.display_size = 0 self.trigger_method =",
"shares, or distribution of # shares. If you do not log into your",
"not suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\" self.soft_dollar_tier_name",
"\"\" self.transmit = True self.parent_id = 0 self.block_order = False self.sweep_to_fill = False",
"type: str self.market_cap_price = None # type: float self.order_ref = None # type:",
"message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if self.hedge_type:",
"and VOL orders only self.stock_range_lower = None # type: float self.stock_range_upper = None",
"= \"\" self.order_miscellaneous_options = {} # type: typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount",
"for pair hedge # Clearing info self.account = \"\" # IB account self.settling_firm",
"self.status = None # type: str self.filled = None # type: float self.remaining",
"- FX, 'P' - pair self.hedge_param = \"\" # 'beta=X' value for beta",
"= None # type: str self.warning_text = None # type: str self.order_id =",
"2 Transparent = 3 class Action(str, enum.Enum): Buy = 'BUY' Sell = 'SELL'",
"type: float self.scale_price_adjust_value = None # type: float self.scale_price_adjust_interval = None # type:",
"datetime.datetime self.active_stop_time = None # type: datetime.datetime self.oca_group = \"\" self.oca_type = 0",
"Filled by status messages self.status = None # type: str self.filled = None",
"# type: float self.order_ref = None # type: str self.combo_legs_description = None #",
"None # type: datetime.datetime self.good_till_date = None # type: datetime.datetime self.rule80a = \"\"",
"90 days. # At the end of the calendar quarter following the current",
"None # type: float self.stock_range_upper = None # type: float self.randomize_price = False",
"self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit",
"message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment > 0.0: # pragma: no cover (I don't",
"'SLONG' class VolatilityType(enum.Enum): Daily = 1 Annual = 2 class OrderType(str, enum.Enum): Undefined",
"consistent with the end of the calendar # quarter following the current quarter.",
"class OrderType(str, enum.Enum): Undefined = \"\" BoxTop = 'BOX TOP' Limit = 'LMT'",
"self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot",
"next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price)",
"= None # type: float self.stock_range_upper = None # type: float self.randomize_price =",
"message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment > 0.0:",
"# action on a security results in a stock split (forward or reverse),",
"self.last_fill_price = None # type: float self.client_id = None # type: int self.why_held",
"institutions message.add(self.designated_location) # # populate only when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a)",
"Buy = 'BUY' Sell = 'SELL' SShort = 'SSHORT' SLONG = 'SLONG' class",
"only when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only)",
"orders with no limit price, also specify zero. self.limit_price = None # type:",
"the # close of the market on the date specified GoodTillDate = \"GTD\"",
"OrderType(str, enum.Enum): Undefined = \"\" BoxTop = 'BOX TOP' Limit = 'LMT' LimitIfTouched",
"type: float self.order_ref = None # type: str self.combo_legs_description = None # type:",
"= None # type: float self.remaining = None # type: float self.average_fill_price =",
"specific watermark price attribs in server version 26 # double lower = (protocol_version",
"Daily = 1 Annual = 2 class OrderType(str, enum.Enum): Undefined = \"\" BoxTop",
"of the quarter is Sunday, the orders will be cancelled on the preceding",
"# The LIMIT price. Used for limit, stop-limit and relative orders. In all",
"IB that remain in force for more than one day will not be",
"self.exempt_code = -1 # SMART routing only self.discretionary_amount = 0.0 self.etrade_only = True",
"the system and in the marketplace until it executes or until the #",
"[] # type: typing.List[None] # not suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours =",
"= None # type: float self.average_fill_price = None # type: float self.perm_id =",
"becomes available, the entire order is canceled. FillOrKill = \"FOK\" DayTillCancel = \"DTC\"",
"self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER)",
"0.0 self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type: float",
"self.order_miscellaneous_options = {} # type: typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount = 0.0",
"v) for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG",
"message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders",
"None # type: int self.last_fill_price = None # type: float self.client_id = None",
"isVolOrder) ? DBL_MAX : selfstockRangeLower; # double upper = (protocol_version == 26 &&",
"Day = \"DAY\" # Valid for the day only. # Good until canceled.",
"# type: str self.maintenance_margin = None # type: str self.equity_with_loan = None #",
"= 'MKT PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket =",
"until canceled. The order will continue to work within the system and in",
"'LOC' Market = 'MKT' MarketIfTouched = 'MIT' MarketOnClose = 'MOC' MarketToLimit = 'MTL'",
"if the last day # of the quarter is Sunday, the orders will",
"self.override_percentage_constraints = False self.trail_stop_price = None # type: float self.trailing_percent = None #",
"2 for institutions message.add(self.designated_location) # # populate only when shortSaleSlot = 2. message.add(self.exempt_code)",
"within the system and in the marketplace until it executes # or is",
"orders. self.active_start_time = None # type: datetime.datetime self.active_stop_time = None # type: datetime.datetime",
"= \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\" # native cash quantity self.cash_quantity",
"self.scale_auto_reset = False self.scale_init_position = None # type: int self.scale_init_fill_quantity = None #",
"\"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility = 'VOL' class",
"int self.why_held = None # type: str self.market_cap_price = None # type: float",
"is canceled. FillOrKill = \"FOK\" DayTillCancel = \"DTC\" # Day until Canceled class",
"self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit = 0 self.conditions = [] #",
"of the market on the date specified GoodTillDate = \"GTD\" Opening = \"OPG\"",
"self.algo_strategy: # pragma: no cover (I don't have actual examples of these) message.add(self.algo_parameters)",
"quarter. For example, if the last day # of the quarter is Sunday,",
"= \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\" self.continuous_update =",
"= 0 self.delta_neutral_designated_location = \"\" self.continuous_update = False self.reference_price_type = None # type:",
"self.order_reference = \"\" self.transmit = True self.parent_id = 0 self.block_order = False self.sweep_to_fill",
"# type: float self.opt_out_smart_routing = False # BOX exchange orders only self.auction_strategy =",
"self.algo_id = \"\" # What-if self.what_if = False # Not Held self.not_held =",
"datetime.datetime self.good_till_date = None # type: datetime.datetime self.rule80a = \"\" self.all_or_none = False",
"SMART routing only self.discretionary_amount = 0.0 self.etrade_only = True self.firm_quote_only = True self.nbbo_price_cap",
"parent: ProtocolInterface) -> None: self._parent = parent self.instrument = None # type: Instrument",
"end of the calendar quarter following the current quarter. For example, an order",
"= None # type: float self.scale_price_adjust_interval = None # type: int self.scale_profit_offset =",
"# # 0 for retail, 1 or 2 for institutions message.add(self.designated_location) # #",
"self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value",
"str] self.reference_contract_id = 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0",
"None # type: Action self.total_quantity = 0.0 self.order_type = OrderType.Market # The LIMIT",
"cover (I don't have actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held)",
"message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment > 0.0: # pragma: no cover (I",
"'SELL' SShort = 'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily = 1 Annual",
"BENCH' Relative = 'REL' RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo = 'REL +",
"the entire order is canceled. FillOrKill = \"FOK\" DayTillCancel = \"DTC\" # Day",
"orders. In all other cases specify zero. For # relative orders with no",
"the marketplace until it executes # or is canceled. GTC orders will be",
"= 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount =",
"None # type: float self.stock_ref_price = None # type: float self.delta = None",
"message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT,",
"# noqa from ib_async.errors import UnsupportedFeature from ib_async.event import Event from ib_async import",
"message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no cover (I",
"day of that quarter. For example, if the last day # of the",
"= \"\" self.mifid2_execution_algo = \"\" updated = Event() # type: Event[None] on_execution =",
"self._parent = parent self.instrument = None # type: Instrument # Filled by status",
"# Orders that are modified will be assigned a new “Auto Expire” date",
"class Order(Serializable): def __init__(self, parent: ProtocolInterface) -> None: self._parent = parent self.instrument =",
"security results in a stock split (forward or reverse), exchange for shares, or",
"institutional short saleslot data (srv v18 and above) message.add(self.short_sale_slot) # # 0 for",
"or 2 for institutions message.add(self.designated_location) # # populate only when shortSaleSlot = 2.",
"when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only)",
"\"\" self.delta_neutral_aux_price = None # type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\"",
"0 self.block_order = False self.sweep_to_fill = False self.display_size = 0 self.trigger_method = 0",
"message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no cover (Not",
"= TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time = None # type: datetime.datetime self.active_stop_time",
"type: str self.combo_legs_description = None # type: str self.inital_margin = None # type:",
"= None # type: float # EFP orders only self.basis_points_type = None #",
"= 'MIT' MarketOnClose = 'MOC' MarketToLimit = 'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative",
"type: str self.filled = None # type: float self.remaining = None # type:",
"= None # type: str self.filled = None # type: float self.remaining =",
"the date specified GoodTillDate = \"GTD\" Opening = \"OPG\" # Use OPG to",
"trading day of that quarter. For example, if the last day # of",
"False self.good_after_time = None # type: datetime.datetime self.good_till_date = None # type: datetime.datetime",
"# 0 for retail, 1 or 2 for institutions message.add(self.designated_location) # # populate",
"SShort = 'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily = 1 Annual =",
"ib_async import execution # noqa from ib_async.instrument import Instrument # noqa from ib_async.protocol",
"str self.market_cap_price = None # type: float self.order_ref = None # type: str",
"of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) #",
"self.clearing_intent = \"\" # \"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO ORDERS",
"= 'PEG BENCH' Relative = 'REL' RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo =",
"message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data (srv v18 and above) message.add(self.short_sale_slot)",
"float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\"",
"self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no cover (Not implemented) raise",
"EFP orders only # SCALE ORDERS ONLY self.scale_init_level_size = None # type: int",
"Order(Serializable): def __init__(self, parent: ProtocolInterface) -> None: self._parent = parent self.instrument = None",
"'D' - delta, 'B' - beta, 'F' - FX, 'P' - pair self.hedge_param",
"quarter is Sunday, the orders will be cancelled on the preceding Friday. #",
"of the calendar quarter following the current quarter. For example, an order placed",
"ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters = {} # type: typing.Dict[str, str] self.smart_combo_routing_params",
"# Day until Canceled class Order(Serializable): def __init__(self, parent: ProtocolInterface) -> None: self._parent",
"# type: Action self.total_quantity = 0.0 self.order_type = OrderType.Market # The LIMIT price.",
"# Good until Date. It will remain working within the system and in",
"self.client_id = None # type: int self.why_held = None # type: str self.market_cap_price",
"self.scale_price_increment = None # type: float self.scale_price_adjust_value = None # type: float self.scale_price_adjust_interval",
"# type: float self.adjusted_stop_price = 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 #",
"message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type)",
"= \"STP\" StopLimit = \"STP LMT\" StopWithProtection = \"STP PRT\" TrailingStop = \"TRAIL\"",
"soon as it becomes available, the entire order is canceled. FillOrKill = \"FOK\"",
"self.delta_neutral_order_type: # pragma: no cover (I don't have actual examples of these) message.add(self.delta_neutral_contract_id)",
"1 Improvement = 2 Transparent = 3 class Action(str, enum.Enum): Buy = 'BUY'",
"(I don't have actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component))",
"occur at the close of the final trading day of that quarter. For",
"\"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters =",
"'MKT' MarketIfTouched = 'MIT' MarketOnClose = 'MOC' MarketToLimit = 'MTL' MarketWithProtection = 'MKT",
"shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap)",
"pair hedge # Clearing info self.account = \"\" # IB account self.settling_firm =",
"# type: float self.scale_price_adjust_interval = None # type: int self.scale_profit_offset = None #",
"Clearing info self.account = \"\" # IB account self.settling_firm = \"\" self.clearing_account =",
"= security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group)",
"message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id)",
"self.open_close = \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location = \"\" self.exempt_code",
"\"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\" self.continuous_update = False",
"orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment > 0.0: # pragma: no",
"only self.fa_group = \"\" self.fa_profile = \"\" self.fa_method = \"\" self.fa_percentage = \"\"",
"= None # type: datetime.datetime self.rule80a = \"\" self.all_or_none = False self.min_quantity =",
"as soon as it becomes available, the entire order is canceled. FillOrKill =",
"= None # type: int # 1=Average, 2 = BidOrAsk # COMBO ORDERS",
"ImmediateOrCancel = \"IOC\" # Good until Date. It will remain working within the",
"and in the marketplace until it executes or until the # close of",
"= True self.parent_id = 0 self.block_order = False self.sweep_to_fill = False self.display_size =",
"None # type: int self.scale_init_fill_quantity = None # type: int self.scale_random_percent = False",
"(I don't have actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity)",
"message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert",
"# noqa from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer",
"if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id = None message.add(security_id_type)",
"\"\" # True beneficiary of the order self.clearing_intent = \"\" # \"\" (Default),",
"beta hedge, 'ratio=Y' for pair hedge # Clearing info self.account = \"\" #",
"self.market_cap_price = None # type: float self.order_ref = None # type: str self.combo_legs_description",
"= False # BOX exchange orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price = None",
"type: float self.trailing_percent = None # type: float # financial advisors only self.fa_group",
"self.origin = OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location = \"\" self.exempt_code = -1 #",
"date specified GoodTillDate = \"GTD\" Opening = \"OPG\" # Use OPG to send",
"self.hedge_param = \"\" # 'beta=X' value for beta hedge, 'ratio=Y' for pair hedge",
"updated = Event() # type: Event[None] on_execution = Event() # type: Event[execution.Execution] def",
"adjustment to your order price on ex-dividend date, consider using a Good-Til-Date/Time (GTD)",
"== \"PEG BENCH\": # pragma: no cover (I don't have actual examples of",
"= \"STP LMT\" StopWithProtection = \"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL",
"ALGO ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters = {} # type: typing.Dict[str, str]",
"other cases specify zero. For # relative orders with no limit price, also",
"(GTD) or # Good-after-Time/Date (GAT) order type, or a combination of the two.",
"MarketIfTouched = 'MIT' MarketOnClose = 'MOC' MarketToLimit = 'MTL' MarketWithProtection = 'MKT PRT'",
"message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data (srv v18 and above)",
"int self.parent_id = None # type: int self.last_fill_price = None # type: float",
"# \"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO ORDERS ONLY self.algo_strategy =",
"None # type: str self.maintenance_margin = None # type: str self.equity_with_loan = None",
"= None # type: float self.max_commission = None # type: float self.commission_currency =",
"ORDERS self.hedge_type = \"\" # 'D' - delta, 'B' - beta, 'F' -",
"of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for (k, v) in",
"into your IB account for 90 days. # At the end of the",
"# type: typing.Dict[str, str] self.smart_combo_routing_params = {} # type: typing.Dict[str, str] self.algo_id =",
"v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no cover",
"message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) #",
"self.settling_firm = \"\" self.clearing_account = \"\" # True beneficiary of the order self.clearing_intent",
"self.discretionary_amount = 0.0 self.etrade_only = True self.firm_quote_only = True self.nbbo_price_cap = None #",
"MarketWithProtection = 'MKT PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket",
"float # financial advisors only self.fa_group = \"\" self.fa_profile = \"\" self.fa_method =",
"pragma: no cover (I don't have actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if",
"cover (I don't have actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position)",
"= None # type: float self.randomize_price = False self.randomize_size = False # VOLATILITY",
"action on a security results in a stock split (forward or reverse), exchange",
"self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location",
"the current quarter. For example, an order placed during the third # quarter",
"the system and in the marketplace until it executes # or is canceled.",
"no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name,",
"message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit)",
"min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message: IncomingMessage):",
"ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0 Firm = 1",
"= \"\" self.all_or_none = False self.min_quantity = None # type: int self.percent_offset =",
"or distribution of # shares. If you do not log into your IB",
"type: Event[None] on_execution = Event() # type: Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id)",
"these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no cover",
"day will not be reduced for dividends. To allow # adjustment to your",
"# # HEDGE ORDERS self.hedge_type = \"\" # 'D' - delta, 'B' -",
"self.reference_contract_id = 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0 self.reference_exchange_id",
"limit, stop-limit and relative orders. In all other cases specify zero. For #",
"None # type: float self.trailing_percent = None # type: float # financial advisors",
"as it becomes available, the entire order is canceled. FillOrKill = \"FOK\" DayTillCancel",
"= None # type: int self.scale_random_percent = False self.scale_table = \"\" # #",
"\"\" self.fa_method = \"\" self.fa_percentage = \"\" # institutional (ie non-cleared) only self.open_close",
"self.etrade_only = True self.firm_quote_only = True self.nbbo_price_cap = None # type: float self.opt_out_smart_routing",
"'PEG MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark = 'PEG BENCH' Relative = 'REL'",
"'PEG BENCH' Relative = 'REL' RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo = 'REL",
"# type: float self.limit_price_offset = 1.7976931348623157e+308 # type: float self.adjusted_stop_price = 1.7976931348623157e+308 #",
"had specific watermark price attribs in server version 26 # double lower =",
"= -1 # SMART routing only self.discretionary_amount = 0.0 self.etrade_only = True self.firm_quote_only",
"allow # adjustment to your order price on ex-dividend date, consider using a",
"type: float self.commission_currency = None # type: str self.warning_text = None # type:",
"The order will continue to work within the system and in the marketplace",
"type: float # extended order fields self.time_in_force = TimeInForce.GoodTillCancel # for GTC orders.",
"from ib_async.instrument import Instrument # noqa from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage,",
"self.action = None # type: Action self.total_quantity = 0.0 self.order_type = OrderType.Market #",
"None # type: int self.scale_price_increment = None # type: float self.scale_price_adjust_value = None",
"self.fa_group = \"\" self.fa_profile = \"\" self.fa_method = \"\" self.fa_percentage = \"\" #",
"For # relative orders with no limit price, also specify zero. self.limit_price =",
"import typing # noqa from ib_async.errors import UnsupportedFeature from ib_async.event import Event from",
"parent self.instrument = None # type: Instrument # Filled by status messages self.status",
"message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG' # not supported message.add(\"\") # deprecated",
"None # type: float self.order_ref = None # type: str self.combo_legs_description = None",
"True self.parent_id = 0 self.block_order = False self.sweep_to_fill = False self.display_size = 0",
"self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None # type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm",
"'LMT' LimitIfTouched = 'LIT' LimitOnClose = 'LOC' Market = 'MKT' MarketIfTouched = 'MIT'",
"watermark price attribs in server version 26 # double lower = (protocol_version ==",
"type: datetime.datetime self.rule80a = \"\" self.all_or_none = False self.min_quantity = None # type:",
"only self.basis_points_type = None # type: int # EFP orders only # SCALE",
"message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data",
"as it becomes available in the market is canceled. ImmediateOrCancel = \"IOC\" #",
"your IB account for 90 days. # At the end of the calendar",
"# double upper = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper;",
"work within the system and in the marketplace until it executes # or",
"message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders",
"message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no cover (I don't have actual",
"v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG BENCH\": # pragma:",
"MKT' Stop = \"STP\" StopLimit = \"STP LMT\" StopWithProtection = \"STP PRT\" TrailingStop",
"self.percent_offset = None # type: float self.override_percentage_constraints = False self.trail_stop_price = None #",
"a combination of the two. GoodTillCancel = \"GTC\" # Immediate or Cancel. Any",
"message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION)",
"# type: datetime.datetime self.good_till_date = None # type: datetime.datetime self.rule80a = \"\" self.all_or_none",
"'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint = 'PEG MID'",
"None # type: str self.market_cap_price = None # type: float self.order_ref = None",
"STP LMT orders, trailing amount, etc. self.aux_price = None # type: float #",
"# shares. If you do not log into your IB account for 90",
"- pair self.hedge_param = \"\" # 'beta=X' value for beta hedge, 'ratio=Y' for",
"import Instrument # noqa from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class",
"Unset = 0 Match = 1 Improvement = 2 Transparent = 3 class",
"self.active_start_time = None # type: datetime.datetime self.active_stop_time = None # type: datetime.datetime self.oca_group",
"actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for (k,",
"# Generic field to contain the stop price for STP LMT orders, trailing",
"= 2 class AuctionStrategy(enum.Enum): Unset = 0 Match = 1 Improvement = 2",
"upper = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper)",
"data (srv v18 and above) message.add(self.short_sale_slot) # # 0 for retail, 1 or",
"does not execute as soon as it becomes available, the entire order is",
"self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY)",
"{} # type: typing.Dict[str, str] self.smart_combo_routing_params = {} # type: typing.Dict[str, str] self.algo_id",
"Event from ib_async import execution # noqa from ib_async.instrument import Instrument # noqa",
"= 0.0 self.order_type = OrderType.Market # The LIMIT price. Used for limit, stop-limit",
"self.delta = None # type: float # pegged to stock and VOL orders",
"no cover (I don't have actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\"",
"int self.scale_price_increment = None # type: float self.scale_price_adjust_value = None # type: float",
"= None # type: str self.maintenance_margin = None # type: str self.equity_with_loan =",
"Sell = 'SELL' SShort = 'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily =",
"float self.opt_out_smart_routing = False # BOX exchange orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price",
"cover (Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset,",
"if self.algo_strategy: # pragma: no cover (I don't have actual examples of these)",
"self.randomize_size = False # VOLATILITY ORDERS ONLY self.volatility = None # type: float",
"int self.last_fill_price = None # type: float self.client_id = None # type: int",
"# pragma: no cover (I don't have actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing)",
"= \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location = \"\" self.exempt_code =",
"pragma: no cover (I don't have actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset)",
"= \"\" updated = Event() # type: Event[None] on_execution = Event() # type:",
"None # type: float self.commission_currency = None # type: str self.warning_text = None",
"= None # type: float self.min_commission = None # type: float self.max_commission =",
"self.hedge_type = \"\" # 'D' - delta, 'B' - beta, 'F' - FX,",
"LMT' RelativeMarketCombo = 'REL + MKT' Stop = \"STP\" StopLimit = \"STP LMT\"",
"0 self.outside_regular_trading_hours = False self.hidden = False self.good_after_time = None # type: datetime.datetime",
"of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price)",
"Firm = 1 Unknown = 2 class AuctionStrategy(enum.Enum): Unset = 0 Match =",
"serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type",
"if self.order_type == \"PEG BENCH\": # pragma: no cover (I don't have actual",
"StopLimit = \"STP LMT\" StopWithProtection = \"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit =",
"0.0 self.etrade_only = True self.firm_quote_only = True self.nbbo_price_cap = None # type: float",
"float self.trailing_percent = None # type: float # financial advisors only self.fa_group =",
"= \"OPG\" # Use OPG to send a market-on-open (MOO) or limit-on-open (LOO)",
"TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility = 'VOL' class TimeInForce(str, enum.Enum):",
"(MOO) or limit-on-open (LOO) self # If the entire Fill-or-Kill order does not",
"&& isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv",
"(I don't have actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK)",
"calendar quarter following the current quarter. For example, an order placed during the",
"from ib_async.event import Event from ib_async import execution # noqa from ib_async.instrument import",
"following conditions: If a corporate # action on a security results in a",
"= None # type: str self.equity_with_loan = None # type: str self.commission =",
"\"TRAIL LIMIT\" Volatility = 'VOL' class TimeInForce(str, enum.Enum): Day = \"DAY\" # Valid",
"= None # type: float self.delta = None # type: float # pegged",
"0 for retail, 1 or 2 for institutions message.add(self.designated_location) # # populate only",
"0 self.designated_location = \"\" self.exempt_code = -1 # SMART routing only self.discretionary_amount =",
"message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility",
"'PASSV REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket = 'PEG MKT' PeggedToStock = 'PEG",
"Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0 Firm = 1 Unknown",
"min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def",
"distribution of # shares. If you do not log into your IB account",
"are modified will be assigned a new “Auto Expire” date consistent with the",
"% (k, v) for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type",
"zero. self.limit_price = None # type: float # Generic field to contain the",
"it executes or until the # close of the market on the date",
"self.perm_id = None # type: int self.parent_id = None # type: int self.last_fill_price",
"# IB account self.settling_firm = \"\" self.clearing_account = \"\" # True beneficiary of",
"third # quarter of 2011 will be canceled at the end of the",
"message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG' # not supported message.add(\"\")",
"the end of the calendar # quarter following the current quarter. # Orders",
"# quarter of 2011 will be canceled at the end of the first",
"min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message: IncomingMessage): assert False,",
"2011 will be canceled at the end of the first quarter of 2012.",
"False self.display_size = 0 self.trigger_method = 0 self.outside_regular_trading_hours = False self.hidden = False",
"type: Action self.total_quantity = 0.0 self.order_type = OrderType.Market # The LIMIT price. Used",
"or is canceled. GTC orders will be automatically be cancelled under the following",
"the end of the first quarter of 2012. If the last day is",
"more than one day will not be reduced for dividends. To allow #",
"# type: int self.scale_subs_level_size = None # type: int self.scale_price_increment = None #",
"None # type: float self.average_fill_price = None # type: float self.perm_id = None",
"quarter of 2012. If the last day is a non-trading day, # the",
"end of the calendar # quarter following the current quarter. # Orders submitted",
"no limit price, also specify zero. self.limit_price = None # type: float #",
"OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id",
"self.short_sale_slot = 0 self.designated_location = \"\" self.exempt_code = -1 # SMART routing only",
"= None # type: float # pegged to stock and VOL orders only",
"the order self.clearing_intent = \"\" # \"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade) #",
"examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time)",
"False self.randomize_size = False # VOLATILITY ORDERS ONLY self.volatility = None # type:",
"= None # type: float self.perm_id = None # type: int self.parent_id =",
"message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no cover (I don't have actual examples of",
"Any portion that is not filled as soon as it becomes available in",
"double upper = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower)",
"PeggedToBenchmark = 'PEG BENCH' Relative = 'REL' RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo",
"# type: int self.last_fill_price = None # type: float self.client_id = None #",
"type: float self.volatility_type = None # type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price =",
"examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma:",
"on ex-dividend date, consider using a Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT) order",
"\"\" self.continuous_update = False self.reference_price_type = None # type: int # 1=Average, 2",
"financial advisors only self.fa_group = \"\" self.fa_profile = \"\" self.fa_method = \"\" self.fa_percentage",
"= None # type: int self.scale_price_increment = None # type: float self.scale_price_adjust_value =",
"None # type: float self.remaining = None # type: float self.average_fill_price = None",
"# type: datetime.datetime self.oca_group = \"\" self.oca_type = 0 self.order_reference = \"\" self.transmit",
"during the third # quarter of 2011 will be canceled at the end",
"What-if self.what_if = False # Not Held self.not_held = False self.solicited = False",
"'P' - pair self.hedge_param = \"\" # 'beta=X' value for beta hedge, 'ratio=Y'",
"type: float self.average_fill_price = None # type: float self.perm_id = None # type:",
"type: datetime.datetime self.active_stop_time = None # type: datetime.datetime self.oca_group = \"\" self.oca_type =",
"None # type: float self.randomize_price = False self.randomize_size = False # VOLATILITY ORDERS",
"message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment)",
"float self.client_id = None # type: int self.why_held = None # type: str",
"“Auto Expire” date consistent with the end of the calendar # quarter following",
"self.volatility_type = None # type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None #",
"UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit,",
"order fields self.action = None # type: Action self.total_quantity = 0.0 self.order_type =",
"reverse), exchange for shares, or distribution of # shares. If you do not",
"message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method)",
"security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity)",
"str self.inital_margin = None # type: str self.maintenance_margin = None # type: str",
"# type: Instrument # Filled by status messages self.status = None # type:",
"\"\" self.mifid2_execution_algo = \"\" updated = Event() # type: Event[None] on_execution = Event()",
"= 'BUY' Sell = 'SELL' SShort = 'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum):",
"to work within the system and in the marketplace until it executes #",
"None # type: float self.client_id = None # type: int self.why_held = None",
"self.scale_subs_level_size = None # type: int self.scale_price_increment = None # type: float self.scale_price_adjust_value",
"in the market is canceled. ImmediateOrCancel = \"IOC\" # Good until Date. It",
"a stock split (forward or reverse), exchange for shares, or distribution of #",
"None # type: float self.min_commission = None # type: float self.max_commission = None",
"these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma: no cover (I don't have actual",
"= None # type: float self.volatility_type = None # type: VolatilityType self.delta_neutral_order_type =",
"+ LMT' RelativeMarketCombo = 'REL + MKT' Stop = \"STP\" StopLimit = \"STP",
"\"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\" # native cash quantity self.cash_quantity =",
"# type: float self.client_id = None # type: int self.why_held = None #",
"# Good-after-Time/Date (GAT) order type, or a combination of the two. GoodTillCancel =",
"None # type: int self.scale_subs_level_size = None # type: int self.scale_price_increment = None",
"\"\" # 'beta=X' value for beta hedge, 'ratio=Y' for pair hedge # Clearing",
"'LIT' LimitOnClose = 'LOC' Market = 'MKT' MarketIfTouched = 'MIT' MarketOnClose = 'MOC'",
"message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size)",
"message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no cover (I don't have",
"conditions\") message.add(0, # no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK)",
"1 or 2 for institutions message.add(self.designated_location) # # populate only when shortSaleSlot =",
"self.block_order = False self.sweep_to_fill = False self.display_size = 0 self.trigger_method = 0 self.outside_regular_trading_hours",
"message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG' # not supported message.add(\"\") # deprecated sharesAllocation",
"26 # double lower = (protocol_version == 26 && isVolOrder) ? DBL_MAX :",
"self.limit_price = None # type: float # Generic field to contain the stop",
"# deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT)",
"= \"\" self.oca_type = 0 self.order_reference = \"\" self.transmit = True self.parent_id =",
"message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders",
"TrailingStopLimit = \"TRAIL LIMIT\" Volatility = 'VOL' class TimeInForce(str, enum.Enum): Day = \"DAY\"",
"self.total_quantity = 0.0 self.order_type = OrderType.Market # The LIMIT price. Used for limit,",
"self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\" # native cash quantity",
"advisors only self.fa_group = \"\" self.fa_profile = \"\" self.fa_method = \"\" self.fa_percentage =",
"= 'LOC' Market = 'MKT' MarketIfTouched = 'MIT' MarketOnClose = 'MOC' MarketToLimit =",
"26 && isVolOrder) ? DBL_MAX : selfstockRangeLower; # double upper = (protocol_version ==",
"self.hedge_type: # pragma: no cover (I don't have actual examples of these) message.add(self.hedge_param)",
"None # type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\"",
"0 self.delta_neutral_designated_location = \"\" self.continuous_update = False self.reference_price_type = None # type: int",
"do not log into your IB account for 90 days. # At the",
"self.model_code = \"\" self.order_miscellaneous_options = {} # type: typing.Dict[str, str] self.reference_contract_id = 0",
"= 'MKT' MarketIfTouched = 'MIT' MarketOnClose = 'MOC' MarketToLimit = 'MTL' MarketWithProtection =",
"orders will be automatically be cancelled under the following conditions: If a corporate",
"None # type: int # EFP orders only # SCALE ORDERS ONLY self.scale_init_level_size",
"REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket = 'PEG MKT' PeggedToStock = 'PEG STK'",
"self.commission = None # type: float self.min_commission = None # type: float self.max_commission",
"self.delta_neutral_aux_price = None # type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account",
"False # VOLATILITY ORDERS ONLY self.volatility = None # type: float self.volatility_type =",
"account for 90 days. # At the end of the calendar quarter following",
"# Immediate or Cancel. Any portion that is not filled as soon as",
"type: float self.stock_range_upper = None # type: float self.randomize_price = False self.randomize_size =",
"'REL + MKT' Stop = \"STP\" StopLimit = \"STP LMT\" StopWithProtection = \"STP",
"0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0 self.reference_exchange_id = \"\"",
"type: Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id =",
"(Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price,",
"self.transmit = True self.parent_id = 0 self.block_order = False self.sweep_to_fill = False self.display_size",
"2012. If the last day is a non-trading day, # the cancellation will",
"type: float # pegged to stock and VOL orders only self.stock_range_lower = None",
"type: int # EFP orders only # SCALE ORDERS ONLY self.scale_init_level_size = None",
"= False self.ext_operator = \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name =",
"= \"\" self.fa_percentage = \"\" # institutional (ie non-cleared) only self.open_close = \"O\"",
"message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id =",
"self.continuous_update = False self.reference_price_type = None # type: int # 1=Average, 2 =",
"str] self.smart_combo_routing_params = {} # type: typing.Dict[str, str] self.algo_id = \"\" # What-if",
"until it executes # or is canceled. GTC orders will be automatically be",
"For example, if the last day # of the quarter is Sunday, the",
"# type: float self.perm_id = None # type: int self.parent_id = None #",
"# or is canceled. GTC orders will be automatically be cancelled under the",
"message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self,",
"BOX exchange orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price = None # type: float",
"self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker,",
"= None # type: datetime.datetime self.good_till_date = None # type: datetime.datetime self.rule80a =",
"day # of the quarter is Sunday, the orders will be cancelled on",
"Held self.not_held = False self.solicited = False self.model_code = \"\" self.order_miscellaneous_options = {}",
"type: float self.delta = None # type: float # pegged to stock and",
"and above) message.add(self.short_sale_slot) # # 0 for retail, 1 or 2 for institutions",
"float self.adjustable_trailing_unit = 0 self.conditions = [] # type: typing.List[None] # not suppored",
"= False self.scale_table = \"\" # # HEDGE ORDERS self.hedge_type = \"\" #",
"= None # type: float self.commission_currency = None # type: str self.warning_text =",
"Unknown = 2 class AuctionStrategy(enum.Enum): Unset = 0 Match = 1 Improvement =",
"price, also specify zero. self.limit_price = None # type: float # Generic field",
"= True self.firm_quote_only = True self.nbbo_price_cap = None # type: float self.opt_out_smart_routing =",
"self.soft_dollar_tier_display_name = \"\" # native cash quantity self.cash_quantity = 1.7976931348623157e+308 # type: float",
"1.7976931348623157e+308 # type: float self.adjusted_stop_price = 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308",
"float self.delta = None # type: float # pegged to stock and VOL",
"# Volatility orders had specific watermark price attribs in server version 26 #",
"for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG BENCH\":",
"entire order is canceled. FillOrKill = \"FOK\" DayTillCancel = \"DTC\" # Day until",
"all other cases specify zero. For # relative orders with no limit price,",
"institutional (ie non-cleared) only self.open_close = \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot = 0",
"self.reference_price_type = None # type: int # 1=Average, 2 = BidOrAsk # COMBO",
"self.warning_text = None # type: str self.order_id = 0 self.client_id = 0 self.perm_id",
"only self.auction_strategy = AuctionStrategy.Unset self.starting_price = None # type: float self.stock_ref_price = None",
"# Good until canceled. The order will continue to work within the system",
"of the final trading day of that quarter. For example, if the last",
"message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order)",
"enum.Enum): Buy = 'BUY' Sell = 'SELL' SShort = 'SSHORT' SLONG = 'SLONG'",
"= \"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility = 'VOL'",
"self.instrument = None # type: Instrument # Filled by status messages self.status =",
"of # shares. If you do not log into your IB account for",
"account self.settling_firm = \"\" self.clearing_account = \"\" # True beneficiary of the order",
"exchange for shares, or distribution of # shares. If you do not log",
"pragma: no cover (Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions self.adjusted_order_type,",
"def __init__(self, parent: ProtocolInterface) -> None: self._parent = parent self.instrument = None #",
"# type: int self.parent_id = None # type: int self.last_fill_price = None #",
"self.adjusted_stop_price = 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount",
"ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0 Firm",
"IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0 Firm = 1 Unknown = 2",
"the last day is a non-trading day, # the cancellation will occur at",
"or a combination of the two. GoodTillCancel = \"GTC\" # Immediate or Cancel.",
"# SCALE ORDERS ONLY self.scale_init_level_size = None # type: int self.scale_subs_level_size = None",
"modified will be assigned a new “Auto Expire” date consistent with the end",
"self.scale_profit_offset = None # type: float self.scale_auto_reset = False self.scale_init_position = None #",
"\"Away\", \"PTA\" (PostTrade) # ALGO ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters = {}",
"stop-limit and relative orders. In all other cases specify zero. For # relative",
"of the two. GoodTillCancel = \"GTC\" # Immediate or Cancel. Any portion that",
"self # If the entire Fill-or-Kill order does not execute as soon as",
"relative orders. In all other cases specify zero. For # relative orders with",
"False self.reference_change_amount = 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308",
"= \"\" self.soft_dollar_tier_display_name = \"\" # native cash quantity self.cash_quantity = 1.7976931348623157e+308 #",
"working within the system and in the marketplace until it executes or until",
"and relative orders. In all other cases specify zero. For # relative orders",
"end of the first quarter of 2012. If the last day is a",
"float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float",
"= 3 class Action(str, enum.Enum): Buy = 'BUY' Sell = 'SELL' SShort =",
"assigned a new “Auto Expire” date consistent with the end of the calendar",
"= 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy)",
"# If the entire Fill-or-Kill order does not execute as soon as it",
"= [] # type: typing.List[None] # not suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours",
"'BUY' Sell = 'SELL' SShort = 'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily",
"self.fa_profile = \"\" self.fa_method = \"\" self.fa_percentage = \"\" # institutional (ie non-cleared)",
"0 self.trigger_method = 0 self.outside_regular_trading_hours = False self.hidden = False self.good_after_time = None",
"be assigned a new “Auto Expire” date consistent with the end of the",
"message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG BENCH\": # pragma: no cover (I don't",
"available in the market is canceled. ImmediateOrCancel = \"IOC\" # Good until Date.",
"AuctionStrategy(enum.Enum): Unset = 0 Match = 1 Improvement = 2 Transparent = 3",
"str self.warning_text = None # type: str self.order_id = 0 self.client_id = 0",
"the cancellation will occur at the close of the final trading day of",
"# type: Event[None] on_execution = Event() # type: Event[execution.Execution] def serialize(self, message: OutgoingMessage):",
"\"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO ORDERS ONLY self.algo_strategy = \"\"",
"self.time_in_force = TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time = None # type: datetime.datetime",
"# type: int self.scale_profit_offset = None # type: float self.scale_auto_reset = False self.scale_init_position",
"None # type: float self.volatility_type = None # type: VolatilityType self.delta_neutral_order_type = \"\"",
"str self.filled = None # type: float self.remaining = None # type: float",
"enum import typing # noqa from ib_async.errors import UnsupportedFeature from ib_async.event import Event",
"of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no",
"orders only self.stock_range_lower = None # type: float self.stock_range_upper = None # type:",
"= False self.reference_change_amount = 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price =",
"1=Average, 2 = BidOrAsk # COMBO ORDERS ONLY self.basis_points = None # type:",
"server version 26 # double lower = (protocol_version == 26 && isVolOrder) ?",
"consider using a Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT) order type, or a",
"VOLATILITY ORDERS ONLY self.volatility = None # type: float self.volatility_type = None #",
"# type: float # pegged to stock and VOL orders only self.stock_range_lower =",
"exchange orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price = None # type: float self.stock_ref_price",
"# type: str self.combo_legs_description = None # type: str self.inital_margin = None #",
"\"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\" # native cash",
"float self.average_fill_price = None # type: float self.perm_id = None # type: int",
"in the marketplace until it executes # or is canceled. GTC orders will",
"# close of the market on the date specified GoodTillDate = \"GTD\" Opening",
"# type: float # EFP orders only self.basis_points_type = None # type: int",
"= \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility = 'VOL' class TimeInForce(str, enum.Enum): Day",
"EFP orders only self.basis_points_type = None # type: int # EFP orders only",
"limit price, also specify zero. self.limit_price = None # type: float # Generic",
"self.instrument.security_type != 'BAG' # not supported message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time)",
"\"\" # IB account self.settling_firm = \"\" self.clearing_account = \"\" # True beneficiary",
"order does not execute as soon as it becomes available, the entire order",
"= Event() # type: Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids:",
"= 'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily = 1 Annual = 2",
"day only. # Good until canceled. The order will continue to work within",
"= False self.randomize_size = False # VOLATILITY ORDERS ONLY self.volatility = None #",
"= \"\" self.exempt_code = -1 # SMART routing only self.discretionary_amount = 0.0 self.etrade_only",
"message.add(self.designated_location) # # populate only when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm)",
"Relative = 'REL' RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo = 'REL + MKT'",
"date, consider using a Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT) order type, or",
"self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type: float self.limit_price_offset",
"is not filled as soon as it becomes available in the market is",
"message.add(self.hedge_type) if self.hedge_type: # pragma: no cover (I don't have actual examples of",
"self.scale_table = \"\" # # HEDGE ORDERS self.hedge_type = \"\" # 'D' -",
"= next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type)",
"examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for (k, v)",
"type: str self.commission = None # type: float self.min_commission = None # type:",
"message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no cover (I don't have actual examples",
"# Not Held self.not_held = False self.solicited = False self.model_code = \"\" self.order_miscellaneous_options",
"type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo =",
"0 Firm = 1 Unknown = 2 class AuctionStrategy(enum.Enum): Unset = 0 Match",
"Action(str, enum.Enum): Buy = 'BUY' Sell = 'SELL' SShort = 'SSHORT' SLONG =",
"extended order fields self.time_in_force = TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time = None",
"pair self.hedge_param = \"\" # 'beta=X' value for beta hedge, 'ratio=Y' for pair",
"message.add(self.override_percentage_constraints) # Volatility orders (srv v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if",
"# pragma: no cover (I don't have actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm)",
"\"OPG\" # Use OPG to send a market-on-open (MOO) or limit-on-open (LOO) self",
"conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value,",
"the following conditions: If a corporate # action on a security results in",
"close of the market on the date specified GoodTillDate = \"GTD\" Opening =",
"float self.order_ref = None # type: str self.combo_legs_description = None # type: str",
"== 26 && isVolOrder) ? DBL_MAX : selfstockRangeLower; # double upper = (protocol_version",
"# no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR)",
"cover (I don't have actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id,",
"self.outside_regular_trading_hours = False self.hidden = False self.good_after_time = None # type: datetime.datetime self.good_till_date",
"= None # type: int # EFP orders only # SCALE ORDERS ONLY",
"float self.limit_price_offset = 1.7976931348623157e+308 # type: float self.adjusted_stop_price = 1.7976931348623157e+308 # type: float",
"self.firm_quote_only = True self.nbbo_price_cap = None # type: float self.opt_out_smart_routing = False #",
"message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment",
"the calendar # quarter following the current quarter. # Orders submitted to IB",
"float # EFP orders only self.basis_points_type = None # type: int # EFP",
"value for beta hedge, 'ratio=Y' for pair hedge # Clearing info self.account =",
"# # populate only when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none)",
"= None # type: float self.override_percentage_constraints = False self.trail_stop_price = None # type:",
"or limit-on-open (LOO) self # If the entire Fill-or-Kill order does not execute",
"(I don't have actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma:",
"in the marketplace until it executes or until the # close of the",
"= Event() # type: Event[None] on_execution = Event() # type: Event[execution.Execution] def serialize(self,",
"# type: float self.stock_range_upper = None # type: float self.randomize_price = False self.randomize_size",
"1.7976931348623157e+308 # type: float self.limit_price_offset = 1.7976931348623157e+308 # type: float self.adjusted_stop_price = 1.7976931348623157e+308",
"\"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type: float self.limit_price_offset = 1.7976931348623157e+308",
"message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH,",
"be canceled at the end of the first quarter of 2012. If the",
"# At the end of the calendar quarter following the current quarter. For",
"it becomes available, the entire order is canceled. FillOrKill = \"FOK\" DayTillCancel =",
"2 = BidOrAsk # COMBO ORDERS ONLY self.basis_points = None # type: float",
"type: typing.Dict[str, str] self.algo_id = \"\" # What-if self.what_if = False # Not",
"= False self.sweep_to_fill = False self.display_size = 0 self.trigger_method = 0 self.outside_regular_trading_hours =",
"self.account = \"\" # IB account self.settling_firm = \"\" self.clearing_account = \"\" #",
"= 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type =",
"of 2011 will be canceled at the end of the first quarter of",
"If a corporate # action on a security results in a stock split",
"\"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0",
"not supported message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage)",
"= False self.hidden = False self.good_after_time = None # type: datetime.datetime self.good_till_date =",
"min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no cover (Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0,",
"message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message: IncomingMessage): assert False, \"Implemented in message handlers\"",
"examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma: no cover (I don't",
"= 0 self.designated_location = \"\" self.exempt_code = -1 # SMART routing only self.discretionary_amount",
"message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size)",
"fields self.time_in_force = TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time = None # type:",
"system and in the marketplace until it executes # or is canceled. GTC",
"soon as it becomes available in the market is canceled. ImmediateOrCancel = \"IOC\"",
"message.add(self.randomize_price) if self.order_type == \"PEG BENCH\": # pragma: no cover (I don't have",
"canceled at the end of the first quarter of 2012. If the last",
"of that quarter. For example, if the last day # of the quarter",
"ONLY self.algo_strategy = \"\" self.algo_parameters = {} # type: typing.Dict[str, str] self.smart_combo_routing_params =",
"message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and",
"<filename>ib_async/order.py import enum import typing # noqa from ib_async.errors import UnsupportedFeature from ib_async.event",
"days. # At the end of the calendar quarter following the current quarter.",
"def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else:",
"ORDERS ONLY self.scale_init_level_size = None # type: int self.scale_subs_level_size = None # type:",
"import execution # noqa from ib_async.instrument import Instrument # noqa from ib_async.protocol import",
"canceled. FillOrKill = \"FOK\" DayTillCancel = \"DTC\" # Day until Canceled class Order(Serializable):",
"be automatically be cancelled under the following conditions: If a corporate # action",
"actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time)",
"self.aux_price = None # type: float # extended order fields self.time_in_force = TimeInForce.GoodTillCancel",
"selfstockRangeLower; # double upper = (protocol_version == 26 && isVolOrder) ? DBL_MAX :",
"# type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent",
"= 'PEG MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark = 'PEG BENCH' Relative =",
"PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket = 'PEG MKT'",
"message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message: IncomingMessage): assert False, \"Implemented",
"float self.scale_auto_reset = False self.scale_init_position = None # type: int self.scale_init_fill_quantity = None",
"no cover (I don't have actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent)",
"To allow # adjustment to your order price on ex-dividend date, consider using",
"typing.List[None] # not suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False self.ext_operator =",
"type: float # EFP orders only self.basis_points_type = None # type: int #",
"float self.remaining = None # type: float self.average_fill_price = None # type: float",
"field to contain the stop price for STP LMT orders, trailing amount, etc.",
"message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if self.hedge_type: # pragma:",
"entire Fill-or-Kill order does not execute as soon as it becomes available, the",
"self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no cover (Not implemented) raise UnsupportedFeature(\"Order conditions\")",
"{} # type: typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease =",
"type: float self.remaining = None # type: float self.average_fill_price = None # type:",
"None # type: str self.inital_margin = None # type: str self.maintenance_margin = None",
"= None # type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account =",
"= \"\" # 'beta=X' value for beta hedge, 'ratio=Y' for pair hedge #",
"supported message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile)",
"ex-dividend date, consider using a Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT) order type,",
"will not be reduced for dividends. To allow # adjustment to your order",
"(k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG BENCH\": #",
"current quarter. For example, an order placed during the third # quarter of",
"float self.scale_price_adjust_value = None # type: float self.scale_price_adjust_interval = None # type: int",
"(forward or reverse), exchange for shares, or distribution of # shares. If you",
"Used for limit, stop-limit and relative orders. In all other cases specify zero.",
"price on ex-dividend date, consider using a Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT)",
"message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment > 0.0: # pragma: no cover",
"limit-on-open (LOO) self # If the entire Fill-or-Kill order does not execute as",
"on the preceding Friday. # Orders that are modified will be assigned a",
"'MIT' MarketOnClose = 'MOC' MarketToLimit = 'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative =",
": selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv v26 and above) message.add(self.volatility)",
"\"\" # 'D' - delta, 'B' - beta, 'F' - FX, 'P' -",
"self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER)",
"version 26 # double lower = (protocol_version == 26 && isVolOrder) ? DBL_MAX",
"\"O\" self.origin = OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location = \"\" self.exempt_code = -1",
"\"\" # native cash quantity self.cash_quantity = 1.7976931348623157e+308 # type: float self.mifid2_decision_maker =",
"message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment",
"message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message: IncomingMessage): assert",
"Good-after-Time/Date (GAT) order type, or a combination of the two. GoodTillCancel = \"GTC\"",
"examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type)",
"Customer = 0 Firm = 1 Unknown = 2 class AuctionStrategy(enum.Enum): Unset =",
"type: typing.List[None] # not suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False self.ext_operator",
"type: float # financial advisors only self.fa_group = \"\" self.fa_profile = \"\" self.fa_method",
"If the entire Fill-or-Kill order does not execute as soon as it becomes",
"\"\" self.fa_profile = \"\" self.fa_method = \"\" self.fa_percentage = \"\" # institutional (ie",
"# 1=Average, 2 = BidOrAsk # COMBO ORDERS ONLY self.basis_points = None #",
"message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no cover (I don't have actual",
"None # type: str self.filled = None # type: float self.remaining = None",
"\"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False",
"a new “Auto Expire” date consistent with the end of the calendar #",
"class Action(str, enum.Enum): Buy = 'BUY' Sell = 'SELL' SShort = 'SSHORT' SLONG",
"for retail, 1 or 2 for institutions message.add(self.designated_location) # # populate only when",
"'B' - beta, 'F' - FX, 'P' - pair self.hedge_param = \"\" #",
"preceding Friday. # Orders that are modified will be assigned a new “Auto",
"OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 # type: float self.limit_price_offset = 1.7976931348623157e+308 # type: float",
"= \"\" # # HEDGE ORDERS self.hedge_type = \"\" # 'D' - delta,",
"typing # noqa from ib_async.errors import UnsupportedFeature from ib_async.event import Event from ib_async",
"quarter. # Orders submitted to IB that remain in force for more than",
"within the system and in the marketplace until it executes or until the",
"str self.maintenance_margin = None # type: str self.equity_with_loan = None # type: str",
"= \"\" # What-if self.what_if = False # Not Held self.not_held = False",
"PeggedToMidpoint = 'PEG MID' PeggedToMarket = 'PEG MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark",
"in force for more than one day will not be reduced for dividends.",
"the orders will be cancelled on the preceding Friday. # Orders that are",
"= \"DAY\" # Valid for the day only. # Good until canceled. The",
"Improvement = 2 Transparent = 3 class Action(str, enum.Enum): Buy = 'BUY' Sell",
"None # type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None # type: float",
"cancelled under the following conditions: If a corporate # action on a security",
"enum.Enum): Day = \"DAY\" # Valid for the day only. # Good until",
"amount, etc. self.aux_price = None # type: float # extended order fields self.time_in_force",
"cover (I don't have actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" %",
"price for STP LMT orders, trailing amount, etc. self.aux_price = None # type:",
"self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG BENCH\": # pragma: no cover",
"typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount =",
"None # type: float self.perm_id = None # type: int self.parent_id = None",
"type: typing.Dict[str, str] self.smart_combo_routing_params = {} # type: typing.Dict[str, str] self.algo_id = \"\"",
"BidOrAsk # COMBO ORDERS ONLY self.basis_points = None # type: float # EFP",
"= 0 Match = 1 Improvement = 2 Transparent = 3 class Action(str,",
"0 self.client_id = 0 self.perm_id = 0 # main order fields self.action =",
"2 class AuctionStrategy(enum.Enum): Unset = 0 Match = 1 Improvement = 2 Transparent",
"AuctionStrategy.Unset self.starting_price = None # type: float self.stock_ref_price = None # type: float",
"= 'PEG STK' PeggedToBenchmark = 'PEG BENCH' Relative = 'REL' RelativeLimitCombo = 'REL",
"False self.solicited = False self.model_code = \"\" self.order_miscellaneous_options = {} # type: typing.Dict[str,",
"zero. For # relative orders with no limit price, also specify zero. self.limit_price",
"self.limit_price_offset = 1.7976931348623157e+308 # type: float self.adjusted_stop_price = 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price",
"type: typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount",
"GTC orders will be automatically be cancelled under the following conditions: If a",
"remain working within the system and in the marketplace until it executes or",
"= False self.solicited = False self.model_code = \"\" self.order_miscellaneous_options = {} # type:",
"DayTillCancel = \"DTC\" # Day until Canceled class Order(Serializable): def __init__(self, parent: ProtocolInterface)",
"Transparent = 3 class Action(str, enum.Enum): Buy = 'BUY' Sell = 'SELL' SShort",
"pragma: no cover (I don't have actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account)",
"self.all_or_none = False self.min_quantity = None # type: int self.percent_offset = None #",
"= True self.nbbo_price_cap = None # type: float self.opt_out_smart_routing = False # BOX",
"type: int self.percent_offset = None # type: float self.override_percentage_constraints = False self.trail_stop_price =",
"type: float self.stock_ref_price = None # type: float self.delta = None # type:",
"self.combo_legs_description = None # type: str self.inital_margin = None # type: str self.maintenance_margin",
"will continue to work within the system and in the marketplace until it",
"self.inital_margin = None # type: str self.maintenance_margin = None # type: str self.equity_with_loan",
"self.opt_out_smart_routing = False # BOX exchange orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price =",
"self.auction_strategy = AuctionStrategy.Unset self.starting_price = None # type: float self.stock_ref_price = None #",
"Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items()))",
"message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no cover (I don't",
"self.average_fill_price = None # type: float self.perm_id = None # type: int self.parent_id",
"of the calendar # quarter following the current quarter. # Orders submitted to",
"send a market-on-open (MOO) or limit-on-open (LOO) self # If the entire Fill-or-Kill",
"# What-if self.what_if = False # Not Held self.not_held = False self.solicited =",
"isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv v26",
"the entire Fill-or-Kill order does not execute as soon as it becomes available,",
"type, or a combination of the two. GoodTillCancel = \"GTC\" # Immediate or",
"float # pegged to stock and VOL orders only self.stock_range_lower = None #",
"The LIMIT price. Used for limit, stop-limit and relative orders. In all other",
"yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\" self.soft_dollar_tier_name = \"\"",
"self.rule80a = \"\" self.all_or_none = False self.min_quantity = None # type: int self.percent_offset",
"None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin)",
"message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG BENCH\": # pragma: no cover (I",
"shares. If you do not log into your IB account for 90 days.",
"message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if",
"type: float self.max_commission = None # type: float self.commission_currency = None # type:",
"type: float self.override_percentage_constraints = False self.trail_stop_price = None # type: float self.trailing_percent =",
"# type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit = 0 self.conditions",
"= 'LIT' LimitOnClose = 'LOC' Market = 'MKT' MarketIfTouched = 'MIT' MarketOnClose =",
"will be automatically be cancelled under the following conditions: If a corporate #",
"type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type:",
"= \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot =",
"False self.ext_operator = \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name = \"\"",
"actual examples of these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update)",
"\"STP\" StopLimit = \"STP LMT\" StopWithProtection = \"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit",
"None # type: float self.opt_out_smart_routing = False # BOX exchange orders only self.auction_strategy",
"# type: float self.min_commission = None # type: float self.max_commission = None #",
"for shares, or distribution of # shares. If you do not log into",
"type: str self.warning_text = None # type: str self.order_id = 0 self.client_id =",
"message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price)",
"stock and VOL orders only self.stock_range_lower = None # type: float self.stock_range_upper =",
"order placed during the third # quarter of 2011 will be canceled at",
"datetime.datetime self.rule80a = \"\" self.all_or_none = False self.min_quantity = None # type: int",
"message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no cover (I don't have actual examples of",
"the close of the final trading day of that quarter. For example, if",
"only # SCALE ORDERS ONLY self.scale_init_level_size = None # type: int self.scale_subs_level_size =",
"order price on ex-dividend date, consider using a Good-Til-Date/Time (GTD) or # Good-after-Time/Date",
"= \"\" BoxTop = 'BOX TOP' Limit = 'LMT' LimitIfTouched = 'LIT' LimitOnClose",
"for more than one day will not be reduced for dividends. To allow",
"delta, 'B' - beta, 'F' - FX, 'P' - pair self.hedge_param = \"\"",
"LMT orders, trailing amount, etc. self.aux_price = None # type: float # extended",
"\"FOK\" DayTillCancel = \"DTC\" # Day until Canceled class Order(Serializable): def __init__(self, parent:",
"a Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT) order type, or a combination of",
"= None # type: datetime.datetime self.active_stop_time = None # type: datetime.datetime self.oca_group =",
"quarter following the current quarter. # Orders submitted to IB that remain in",
"float # Generic field to contain the stop price for STP LMT orders,",
"message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) #",
"= (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeLower; # double upper",
"Use OPG to send a market-on-open (MOO) or limit-on-open (LOO) self # If",
"# AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had specific watermark",
"class TimeInForce(str, enum.Enum): Day = \"DAY\" # Valid for the day only. #",
"routing only self.discretionary_amount = 0.0 self.etrade_only = True self.firm_quote_only = True self.nbbo_price_cap =",
"= parent self.instrument = None # type: Instrument # Filled by status messages",
"status messages self.status = None # type: str self.filled = None # type:",
"self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale",
"# pragma: no cover (I don't have actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy)",
"it executes # or is canceled. GTC orders will be automatically be cancelled",
"# ALGO ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters = {} # type: typing.Dict[str,",
"orders, trailing amount, etc. self.aux_price = None # type: float # extended order",
"ib_async.event import Event from ib_async import execution # noqa from ib_async.instrument import Instrument",
"message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG' # not",
"# SMART routing only self.discretionary_amount = 0.0 self.etrade_only = True self.firm_quote_only = True",
"type: float self.opt_out_smart_routing = False # BOX exchange orders only self.auction_strategy = AuctionStrategy.Unset",
"\"DTC\" # Day until Canceled class Order(Serializable): def __init__(self, parent: ProtocolInterface) -> None:",
"(GAT) order type, or a combination of the two. GoodTillCancel = \"GTC\" #",
"# type: str self.warning_text = None # type: str self.order_id = 0 self.client_id",
"to your order price on ex-dividend date, consider using a Good-Til-Date/Time (GTD) or",
"stock split (forward or reverse), exchange for shares, or distribution of # shares.",
"= False # VOLATILITY ORDERS ONLY self.volatility = None # type: float self.volatility_type",
"orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price = None # type: float self.stock_ref_price =",
"'MOC' MarketToLimit = 'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint",
"float self.override_percentage_constraints = False self.trail_stop_price = None # type: float self.trailing_percent = None",
"message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had specific watermark price attribs in server",
"import UnsupportedFeature from ib_async.event import Event from ib_async import execution # noqa from",
"deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) #",
"None # type: str self.commission = None # type: float self.min_commission = None",
"= 1.7976931348623157e+308 # type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader =",
"self.client_id = 0 self.perm_id = 0 # main order fields self.action = None",
"= None # type: float # financial advisors only self.fa_group = \"\" self.fa_profile",
"= None # type: int self.why_held = None # type: str self.market_cap_price =",
"int self.percent_offset = None # type: float self.override_percentage_constraints = False self.trail_stop_price = None",
"beneficiary of the order self.clearing_intent = \"\" # \"\" (Default), \"IB\", \"Away\", \"PTA\"",
"# HEDGE ORDERS self.hedge_type = \"\" # 'D' - delta, 'B' - beta,",
"message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type =",
"= \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\" updated = Event() # type:",
"# Clearing info self.account = \"\" # IB account self.settling_firm = \"\" self.clearing_account",
"False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\" self.continuous_update = False self.reference_price_type = None",
"self.maintenance_margin = None # type: str self.equity_with_loan = None # type: str self.commission",
"the first quarter of 2012. If the last day is a non-trading day,",
"of 2012. If the last day is a non-trading day, # the cancellation",
"selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv v26 and above) message.add(self.volatility) message.add(self.volatility_type)",
"None # type: float self.delta = None # type: float # pegged to",
"is canceled. ImmediateOrCancel = \"IOC\" # Good until Date. It will remain working",
"# 'beta=X' value for beta hedge, 'ratio=Y' for pair hedge # Clearing info",
"self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type",
"non-cleared) only self.open_close = \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location =",
"typing.Dict[str, str] self.smart_combo_routing_params = {} # type: typing.Dict[str, str] self.algo_id = \"\" #",
"self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm = \"\" self.delta_neutral_clearing_account = \"\" self.delta_neutral_clearing_intent = \"\" self.delta_neutral_open_close",
"these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease, self.pegged_change_amount, self.reference_change_amount, self.reference_exchange_id, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) if self.conditions: # pragma: no cover",
"self.order_id = 0 self.client_id = 0 self.perm_id = 0 # main order fields",
"= False self.reference_price_type = None # type: int # 1=Average, 2 = BidOrAsk",
"self.parent_id = None # type: int self.last_fill_price = None # type: float self.client_id",
"MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark = 'PEG BENCH' Relative = 'REL' RelativeLimitCombo",
"int self.scale_subs_level_size = None # type: int self.scale_price_increment = None # type: float",
"= 1 Improvement = 2 Transparent = 3 class Action(str, enum.Enum): Buy =",
"= None # type: Instrument # Filled by status messages self.status = None",
"self.delta_neutral_designated_location = \"\" self.continuous_update = False self.reference_price_type = None # type: int #",
"None # type: float # financial advisors only self.fa_group = \"\" self.fa_profile =",
"beta, 'F' - FX, 'P' - pair self.hedge_param = \"\" # 'beta=X' value",
"= {} # type: typing.Dict[str, str] self.algo_id = \"\" # What-if self.what_if =",
"1.7976931348623157e+308 # type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit = 0",
"True self.nbbo_price_cap = None # type: float self.opt_out_smart_routing = False # BOX exchange",
"self.scale_init_position = None # type: int self.scale_init_fill_quantity = None # type: int self.scale_random_percent",
"to send a market-on-open (MOO) or limit-on-open (LOO) self # If the entire",
"= 'PEG MID' PeggedToMarket = 'PEG MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark =",
"# pragma: no cover (I don't have actual examples of these) message.add(self.reference_contract_id, self.is_pegged_change_amount_decrease,",
"import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0 Firm =",
"self.hidden = False self.good_after_time = None # type: datetime.datetime self.good_till_date = None #",
"the day only. # Good until canceled. The order will continue to work",
"ONLY self.volatility = None # type: float self.volatility_type = None # type: VolatilityType",
"datetime.datetime self.oca_group = \"\" self.oca_type = 0 self.order_reference = \"\" self.transmit = True",
"from ib_async.errors import UnsupportedFeature from ib_async.event import Event from ib_async import execution #",
"None # type: str self.warning_text = None # type: str self.order_id = 0",
"raise UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions self.adjusted_order_type, self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount,",
"split (forward or reverse), exchange for shares, or distribution of # shares. If",
"= False self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value =",
"= None # type: float # extended order fields self.time_in_force = TimeInForce.GoodTillCancel #",
"LIMIT price. Used for limit, stop-limit and relative orders. In all other cases",
"message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference)",
"= \"IOC\" # Good until Date. It will remain working within the system",
"don't have actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v)",
"self.filled = None # type: float self.remaining = None # type: float self.average_fill_price",
"if self.delta_neutral_order_type: # pragma: no cover (I don't have actual examples of these)",
"None # type: float # extended order fields self.time_in_force = TimeInForce.GoodTillCancel # for",
"have actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component:",
"== 26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility",
"system and in the marketplace until it executes or until the # close",
"available, the entire order is canceled. FillOrKill = \"FOK\" DayTillCancel = \"DTC\" #",
"type: int self.scale_price_increment = None # type: float self.scale_price_adjust_value = None # type:",
"PassiveRelative = 'PASSV REL' PeggedToMidpoint = 'PEG MID' PeggedToMarket = 'PEG MKT' PeggedToStock",
"as soon as it becomes available in the market is canceled. ImmediateOrCancel =",
"Volatility orders had specific watermark price attribs in server version 26 # double",
"placed during the third # quarter of 2011 will be canceled at the",
"False self.sweep_to_fill = False self.display_size = 0 self.trigger_method = 0 self.outside_regular_trading_hours = False",
"MarketOnClose = 'MOC' MarketToLimit = 'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative = 'PASSV",
"type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit = 0 self.conditions =",
"(srv v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma: no",
"2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) #",
"the final trading day of that quarter. For example, if the last day",
"and self.scale_price_increment > 0.0: # pragma: no cover (I don't have actual examples",
"pragma: no cover (I don't have actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if)",
"= 0.0 self.etrade_only = True self.firm_quote_only = True self.nbbo_price_cap = None # type:",
"native cash quantity self.cash_quantity = 1.7976931348623157e+308 # type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo",
"= 'REL' RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo = 'REL + MKT' Stop",
"0.0: # pragma: no cover (I don't have actual examples of these) message.add(self.scale_price_adjust_value)",
"float # extended order fields self.time_in_force = TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time",
"Good-Til-Date/Time (GTD) or # Good-after-Time/Date (GAT) order type, or a combination of the",
"# pegged to stock and VOL orders only self.stock_range_lower = None # type:",
"# BOX exchange orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price = None # type:",
"self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\" self.continuous_update = False self.reference_price_type",
"= OrderType.Market # The LIMIT price. Used for limit, stop-limit and relative orders.",
"{} # type: typing.Dict[str, str] self.algo_id = \"\" # What-if self.what_if = False",
"from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0",
"for 90 days. # At the end of the calendar quarter following the",
"= None # type: float self.order_ref = None # type: str self.combo_legs_description =",
"= None # type: int self.percent_offset = None # type: float self.override_percentage_constraints =",
"str self.order_id = 0 self.client_id = 0 self.perm_id = 0 # main order",
"= None # type: float # Generic field to contain the stop price",
"self.oca_type = 0 self.order_reference = \"\" self.transmit = True self.parent_id = 0 self.block_order",
"= \"\" self.delta_neutral_aux_price = None # type: float self.delta_neutral_contract_id = 0 self.delta_neutral_settling_firm =",
"Limit = 'LMT' LimitIfTouched = 'LIT' LimitOnClose = 'LOC' Market = 'MKT' MarketIfTouched",
"\"\" self.soft_dollar_tier_display_name = \"\" # native cash quantity self.cash_quantity = 1.7976931348623157e+308 # type:",
"self.volatility = None # type: float self.volatility_type = None # type: VolatilityType self.delta_neutral_order_type",
"# Filled by status messages self.status = None # type: str self.filled =",
"for limit, stop-limit and relative orders. In all other cases specify zero. For",
"str] self.algo_id = \"\" # What-if self.what_if = False # Not Held self.not_held",
"v18 and above) message.add(self.short_sale_slot) # # 0 for retail, 1 or 2 for",
"that remain in force for more than one day will not be reduced",
"= 2 class OrderType(str, enum.Enum): Undefined = \"\" BoxTop = 'BOX TOP' Limit",
"else: security_id_type = security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price)",
"the current quarter. # Orders submitted to IB that remain in force for",
"message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data (srv",
"not execute as soon as it becomes available, the entire order is canceled.",
"= 'VOL' class TimeInForce(str, enum.Enum): Day = \"DAY\" # Valid for the day",
"in a stock split (forward or reverse), exchange for shares, or distribution of",
"'SSHORT' SLONG = 'SLONG' class VolatilityType(enum.Enum): Daily = 1 Annual = 2 class",
"self.trigger_price, self.limit_price_offset, self.adjusted_stop_price, self.adjusted_stop_limit_price, self.adjusted_trailing_amount, self.adjustable_trailing_unit, min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity,",
"that quarter. For example, if the last day # of the quarter is",
"'BAG' # not supported message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group)",
"Not Held self.not_held = False self.solicited = False self.model_code = \"\" self.order_miscellaneous_options =",
"None # type: datetime.datetime self.oca_group = \"\" self.oca_type = 0 self.order_reference = \"\"",
"quantity self.cash_quantity = 1.7976931348623157e+308 # type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\"",
"? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv v26 and",
"message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date) message.add(self.fa_group) message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code,",
"for dividends. To allow # adjustment to your order price on ex-dividend date,",
"security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account)",
"3 class Action(str, enum.Enum): Buy = 'BUY' Sell = 'SELL' SShort = 'SSHORT'",
"self.why_held = None # type: str self.market_cap_price = None # type: float self.order_ref",
"under the following conditions: If a corporate # action on a security results",
"message.add(\"\".join(\"%s=%s;\" % (k, v) for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if",
"populate only when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset)",
"\"\" self.all_or_none = False self.min_quantity = None # type: int self.percent_offset = None",
"Generic field to contain the stop price for STP LMT orders, trailing amount,",
"type: datetime.datetime self.oca_group = \"\" self.oca_type = 0 self.order_reference = \"\" self.transmit =",
"message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours)",
"market-on-open (MOO) or limit-on-open (LOO) self # If the entire Fill-or-Kill order does",
"self.not_held = False self.solicited = False self.model_code = \"\" self.order_miscellaneous_options = {} #",
"DBL_MAX : selfstockRangeLower; # double upper = (protocol_version == 26 && isVolOrder) ?",
"= \"\" self.fa_method = \"\" self.fa_percentage = \"\" # institutional (ie non-cleared) only",
"type: Instrument # Filled by status messages self.status = None # type: str",
"# relative orders with no limit price, also specify zero. self.limit_price = None",
"IB account self.settling_firm = \"\" self.clearing_account = \"\" # True beneficiary of the",
"or # Good-after-Time/Date (GAT) order type, or a combination of the two. GoodTillCancel",
"main order fields self.action = None # type: Action self.total_quantity = 0.0 self.order_type",
"attribs in server version 26 # double lower = (protocol_version == 26 &&",
"RelativeLimitCombo = 'REL + LMT' RelativeMarketCombo = 'REL + MKT' Stop = \"STP\"",
"Stop = \"STP\" StopLimit = \"STP LMT\" StopWithProtection = \"STP PRT\" TrailingStop =",
"self.is_pegged_change_amount_decrease = False self.reference_change_amount = 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price",
"on the date specified GoodTillDate = \"GTD\" Opening = \"OPG\" # Use OPG",
"self.min_quantity = None # type: int self.percent_offset = None # type: float self.override_percentage_constraints",
"SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment > 0.0: # pragma:",
"= None # type: str self.order_id = 0 self.client_id = 0 self.perm_id =",
"message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no cover (I don't have actual examples",
"float self.max_commission = None # type: float self.commission_currency = None # type: str",
"# type: int self.scale_init_fill_quantity = None # type: int self.scale_random_percent = False self.scale_table",
"str self.commission = None # type: float self.min_commission = None # type: float",
"= {} # type: typing.Dict[str, str] self.smart_combo_routing_params = {} # type: typing.Dict[str, str]",
"'ratio=Y' for pair hedge # Clearing info self.account = \"\" # IB account",
"message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if self.hedge_type: # pragma: no cover (I don't",
"(srv v18 and above) message.add(self.short_sale_slot) # # 0 for retail, 1 or 2",
"# Orders submitted to IB that remain in force for more than one",
"# type: float # Generic field to contain the stop price for STP",
"Canceled class Order(Serializable): def __init__(self, parent: ProtocolInterface) -> None: self._parent = parent self.instrument",
"= \"\" self.transmit = True self.parent_id = 0 self.block_order = False self.sweep_to_fill =",
"type: str self.equity_with_loan = None # type: str self.commission = None # type:",
"type: float self.randomize_price = False self.randomize_size = False # VOLATILITY ORDERS ONLY self.volatility",
"message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill)",
"message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type !=",
"self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\" self.soft_dollar_tier_name = \"\" self.soft_dollar_tier_value = \"\" self.soft_dollar_tier_display_name",
"= 0 self.trigger_method = 0 self.outside_regular_trading_hours = False self.hidden = False self.good_after_time =",
"actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: #",
"the third # quarter of 2011 will be canceled at the end of",
"LIMIT\" Volatility = 'VOL' class TimeInForce(str, enum.Enum): Day = \"DAY\" # Valid for",
"orders will be cancelled on the preceding Friday. # Orders that are modified",
"of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if self.instrument.underlying_component: # pragma: no",
"# type: datetime.datetime self.active_stop_time = None # type: datetime.datetime self.oca_group = \"\" self.oca_type",
"have actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset) message.add(self.scale_init_position) message.add(self.scale_init_fill_quantity) message.add(self.scale_random_percent) message.add(self.scale_table)",
"will be cancelled on the preceding Friday. # Orders that are modified will",
"order will continue to work within the system and in the marketplace until",
"self.scale_random_percent = False self.scale_table = \"\" # # HEDGE ORDERS self.hedge_type = \"\"",
"self.max_commission = None # type: float self.commission_currency = None # type: str self.warning_text",
"message.add(self.oca_group) message.add(self.account) message.add(self.open_close) message.add(self.origin) message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden)",
"if self.conditions: # pragma: no cover (Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, #",
"the market is canceled. ImmediateOrCancel = \"IOC\" # Good until Date. It will",
"self.nbbo_price_cap = None # type: float self.opt_out_smart_routing = False # BOX exchange orders",
"# type: float self.trailing_percent = None # type: float # financial advisors only",
"PeggedToStock = 'PEG STK' PeggedToBenchmark = 'PEG BENCH' Relative = 'REL' RelativeLimitCombo =",
"message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT",
"pegged to stock and VOL orders only self.stock_range_lower = None # type: float",
"StopWithProtection = \"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility =",
"= None # type: float self.client_id = None # type: int self.why_held =",
"no cover (I don't have actual examples of these) message.add(self.scale_price_adjust_value) message.add(self.scale_price_adjust_interval) message.add(self.scale_profit_offset) message.add(self.scale_auto_reset)",
"min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data (srv v18 and above) message.add(self.short_sale_slot) # #",
"= 'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint = 'PEG",
"= \"\" # institutional (ie non-cleared) only self.open_close = \"O\" self.origin = OrderOrigin.Customer",
"type: int self.scale_subs_level_size = None # type: int self.scale_price_increment = None # type:",
"'PEG STK' PeggedToBenchmark = 'PEG BENCH' Relative = 'REL' RelativeLimitCombo = 'REL +",
"= None # type: str self.combo_legs_description = None # type: str self.inital_margin =",
"If the last day is a non-trading day, # the cancellation will occur",
"# type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float self.adjusted_trailing_amount = 1.7976931348623157e+308 #",
"= 1.7976931348623157e+308 # type: float self.adjusted_stop_price = 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price =",
"type: float self.perm_id = None # type: int self.parent_id = None # type:",
"for GTC orders. self.active_start_time = None # type: datetime.datetime self.active_stop_time = None #",
"specify zero. For # relative orders with no limit price, also specify zero.",
"# type: float self.average_fill_price = None # type: float self.perm_id = None #",
"self.randomize_price = False self.randomize_size = False # VOLATILITY ORDERS ONLY self.volatility = None",
"\"PTA\" (PostTrade) # ALGO ORDERS ONLY self.algo_strategy = \"\" self.algo_parameters = {} #",
"It will remain working within the system and in the marketplace until it",
"\"\" self.order_miscellaneous_options = {} # type: typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount =",
"at the close of the final trading day of that quarter. For example,",
"At the end of the calendar quarter following the current quarter. For example,",
"type: VolatilityType self.delta_neutral_order_type = \"\" self.delta_neutral_aux_price = None # type: float self.delta_neutral_contract_id =",
"import enum import typing # noqa from ib_async.errors import UnsupportedFeature from ib_async.event import",
"int self.scale_init_fill_quantity = None # type: int self.scale_random_percent = False self.scale_table = \"\"",
"date consistent with the end of the calendar # quarter following the current",
"(LOO) self # If the entire Fill-or-Kill order does not execute as soon",
"type: float self.adjusted_stop_price = 1.7976931348623157e+308 # type: float self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type:",
"# type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo",
"self.trigger_price = 1.7976931348623157e+308 # type: float self.limit_price_offset = 1.7976931348623157e+308 # type: float self.adjusted_stop_price",
"than one day will not be reduced for dividends. To allow # adjustment",
"self.order_ref = None # type: str self.combo_legs_description = None # type: str self.inital_margin",
"# type: float self.override_percentage_constraints = False self.trail_stop_price = None # type: float self.trailing_percent",
"self.reference_change_amount = 0.0 self.reference_exchange_id = \"\" self.adjusted_order_type = OrderType.Undefined self.trigger_price = 1.7976931348623157e+308 #",
"UnsupportedFeature from ib_async.event import Event from ib_async import execution # noqa from ib_async.instrument",
"Day until Canceled class Order(Serializable): def __init__(self, parent: ProtocolInterface) -> None: self._parent =",
"message.add(self.hidden) assert self.instrument.security_type != 'BAG' # not supported message.add(\"\") # deprecated sharesAllocation field",
"type: float self.limit_price_offset = 1.7976931348623157e+308 # type: float self.adjusted_stop_price = 1.7976931348623157e+308 # type:",
"for beta hedge, 'ratio=Y' for pair hedge # Clearing info self.account = \"\"",
"-> None: self._parent = parent self.instrument = None # type: Instrument # Filled",
"self.scale_price_increment > 0.0: # pragma: no cover (I don't have actual examples of",
"= \"DTC\" # Day until Canceled class Order(Serializable): def __init__(self, parent: ProtocolInterface) ->",
"# type: int # EFP orders only # SCALE ORDERS ONLY self.scale_init_level_size =",
"executes # or is canceled. GTC orders will be automatically be cancelled under",
"message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity) message.add(self.percent_offset) message.add(self.etrade_only) message.add(self.firm_quote_only) message.add(self.nbbo_price_cap) message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price)",
"float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\"",
"(ie non-cleared) only self.open_close = \"O\" self.origin = OrderOrigin.Customer self.short_sale_slot = 0 self.designated_location",
"type: float self.adjustable_trailing_unit = 0 self.conditions = [] # type: typing.List[None] # not",
"min_version=ProtocolVersion.PEGGED_TO_BENCHMARK) message.add(self.ext_operator, min_version=ProtocolVersion.EXT_OPERATOR) message.add(self.soft_dollar_tier_name, self.soft_dollar_tier_value, min_version=ProtocolVersion.SOFT_DOLLAR_TIER) message.add(self.cash_quantity, min_version=ProtocolVersion.CASH_QTY) message.add(self.mifid2_decision_maker, self.mifid2_decision_algo, min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo,",
"self.scale_init_level_size = None # type: int self.scale_subs_level_size = None # type: int self.scale_price_increment",
"of the order self.clearing_intent = \"\" # \"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade)",
"= None # type: str self.inital_margin = None # type: str self.maintenance_margin =",
"# type: float self.max_commission = None # type: float self.commission_currency = None #",
"orders only self.basis_points_type = None # type: int # EFP orders only #",
"or until the # close of the market on the date specified GoodTillDate",
"hedge, 'ratio=Y' for pair hedge # Clearing info self.account = \"\" # IB",
"cases specify zero. For # relative orders with no limit price, also specify",
"the marketplace until it executes or until the # close of the market",
"marketplace until it executes or until the # close of the market on",
"the two. GoodTillCancel = \"GTC\" # Immediate or Cancel. Any portion that is",
"last day is a non-trading day, # the cancellation will occur at the",
"specify zero. self.limit_price = None # type: float # Generic field to contain",
"# not suppored yet self.conditions_cancel_order = False self.conditions_ignore_regular_trading_hours = False self.ext_operator = \"\"",
"SCALE ORDERS ONLY self.scale_init_level_size = None # type: int self.scale_subs_level_size = None #",
"# type: str self.market_cap_price = None # type: float self.order_ref = None #",
"ib_async.instrument import Instrument # noqa from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage",
"None # type: int self.parent_id = None # type: int self.last_fill_price = None",
"False self.reference_price_type = None # type: int # 1=Average, 2 = BidOrAsk #",
"and in the marketplace until it executes # or is canceled. GTC orders",
"if self.hedge_type: # pragma: no cover (I don't have actual examples of these)",
"ib_async.errors import UnsupportedFeature from ib_async.event import Event from ib_async import execution # noqa",
"If you do not log into your IB account for 90 days. #",
"self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\" updated = Event() #",
"\"\" # # HEDGE ORDERS self.hedge_type = \"\" # 'D' - delta, 'B'",
"market on the date specified GoodTillDate = \"GTD\" Opening = \"OPG\" # Use",
"only self.discretionary_amount = 0.0 self.etrade_only = True self.firm_quote_only = True self.nbbo_price_cap = None",
"message.add(self.scale_random_percent) message.add(self.scale_table) message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if self.hedge_type: # pragma: no",
"message.add(self.algo_parameters) message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited)",
"# type: int # 1=Average, 2 = BidOrAsk # COMBO ORDERS ONLY self.basis_points",
"close of the final trading day of that quarter. For example, if the",
"= None # type: int self.parent_id = None # type: int self.last_fill_price =",
"self.mifid2_execution_algo = \"\" updated = Event() # type: Event[None] on_execution = Event() #",
"# pragma: no cover (I don't have actual examples of these) message.add(self.algo_parameters) message.add(self.algo_id)",
"PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility = 'VOL' class TimeInForce(str,",
"= \"\" self.delta_neutral_open_close = \"\" self.delta_neutral_short_sale = False self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location =",
"until it executes or until the # close of the market on the",
"these) message.add(self.delta_neutral_contract_id) message.add(self.delta_neutral_settling_firm) message.add(self.delta_neutral_clearing_account) message.add(self.delta_neutral_clearing_intent) message.add(self.delta_neutral_open_close) message.add(self.delta_neutral_short_sale) message.add(self.delta_neutral_short_sale_slot) message.add(self.delta_neutral_designated_location) message.add(self.continuous_update) message.add(self.reference_price_type) message.add(self.trail_stop_price) message.add(self.trailing_percent)",
"type: int self.scale_profit_offset = None # type: float self.scale_auto_reset = False self.scale_init_position =",
"= 2 Transparent = 3 class Action(str, enum.Enum): Buy = 'BUY' Sell =",
"\"IOC\" # Good until Date. It will remain working within the system and",
"the preceding Friday. # Orders that are modified will be assigned a new",
"Friday. # Orders that are modified will be assigned a new “Auto Expire”",
"= 0 Firm = 1 Unknown = 2 class AuctionStrategy(enum.Enum): Unset = 0",
"with no limit price, also specify zero. self.limit_price = None # type: float",
"orders (srv v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price) if self.delta_neutral_order_type: # pragma:",
"# the cancellation will occur at the close of the final trading day",
"self.scale_price_adjust_value = None # type: float self.scale_price_adjust_interval = None # type: int self.scale_profit_offset",
"26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders",
"(protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) #",
"type: float # Generic field to contain the stop price for STP LMT",
"Undefined = \"\" BoxTop = 'BOX TOP' Limit = 'LMT' LimitIfTouched = 'LIT'",
"message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT message.add(self.starting_price) message.add(self.stock_ref_price) message.add(self.delta) # Volatility orders had specific",
"MarketToLimit = 'MTL' MarketWithProtection = 'MKT PRT' PassiveRelative = 'PASSV REL' PeggedToMidpoint =",
"not log into your IB account for 90 days. # At the end",
"None # type: float self.scale_price_adjust_interval = None # type: int self.scale_profit_offset = None",
"ProtocolInterface) -> None: self._parent = parent self.instrument = None # type: Instrument #",
"self.fa_percentage = \"\" # institutional (ie non-cleared) only self.open_close = \"O\" self.origin =",
"float self.commission_currency = None # type: str self.warning_text = None # type: str",
"For example, an order placed during the third # quarter of 2011 will",
"message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG' #",
"1 Unknown = 2 class AuctionStrategy(enum.Enum): Unset = 0 Match = 1 Improvement",
"submitted to IB that remain in force for more than one day will",
"= BidOrAsk # COMBO ORDERS ONLY self.basis_points = None # type: float #",
"= None # type: int self.scale_profit_offset = None # type: float self.scale_auto_reset =",
"have actual examples of these) message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma: no cover",
"= 0 self.order_reference = \"\" self.transmit = True self.parent_id = 0 self.block_order =",
"type: float self.client_id = None # type: int self.why_held = None # type:",
"= {} # type: typing.Dict[str, str] self.reference_contract_id = 0 self.pegged_change_amount = 0.0 self.is_pegged_change_amount_decrease",
"!= 'BAG' # not supported message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount) message.add(self.good_after_time) message.add(self.good_till_date)",
"= \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader = \"\" self.mifid2_execution_algo = \"\" updated =",
"enum.Enum): Undefined = \"\" BoxTop = 'BOX TOP' Limit = 'LMT' LimitIfTouched =",
"= 'REL + MKT' Stop = \"STP\" StopLimit = \"STP LMT\" StopWithProtection =",
"= None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force) message.add(self.oca_group) message.add(self.account) message.add(self.open_close)",
"message.add(self.algo_id) message.add(self.what_if) message.add(\"\".join(\"%s=%s;\" % (k, v) for (k, v) in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size)",
"don't have actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent) message.add(self.not_held) message.add(bool(self.instrument.underlying_component)) if",
"for the day only. # Good until canceled. The order will continue to",
"GoodTillCancel = \"GTC\" # Immediate or Cancel. Any portion that is not filled",
"VOL orders only self.stock_range_lower = None # type: float self.stock_range_upper = None #",
"lower = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeLower; # double",
"order self.clearing_intent = \"\" # \"\" (Default), \"IB\", \"Away\", \"PTA\" (PostTrade) # ALGO",
"message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv v26 and above) message.add(self.volatility) message.add(self.volatility_type) message.add(self.delta_neutral_order_type) message.add(self.delta_neutral_aux_price)",
"= (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints)",
"\"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\" Volatility = 'VOL' class TimeInForce(str, enum.Enum): Day =",
"# extended order fields self.time_in_force = TimeInForce.GoodTillCancel # for GTC orders. self.active_start_time =",
"message.add(self.active_start_time) message.add(self.active_stop_time) # HEDGE orders message.add(self.hedge_type) if self.hedge_type: # pragma: no cover (I",
"# type: float self.commission_currency = None # type: str self.warning_text = None #",
"be cancelled under the following conditions: If a corporate # action on a",
"self.algo_parameters = {} # type: typing.Dict[str, str] self.smart_combo_routing_params = {} # type: typing.Dict[str,",
"type: int self.scale_random_percent = False self.scale_table = \"\" # # HEDGE ORDERS self.hedge_type",
"in self.order_miscellaneous_options.items())) message.add(self.solicited) message.add(self.randomize_size) message.add(self.randomize_price) if self.order_type == \"PEG BENCH\": # pragma: no",
"None # type: str self.combo_legs_description = None # type: str self.inital_margin = None",
"\"STP LMT\" StopWithProtection = \"STP PRT\" TrailingStop = \"TRAIL\" TrailingStopLimit = \"TRAIL LIMIT\"",
"by status messages self.status = None # type: str self.filled = None #",
"- beta, 'F' - FX, 'P' - pair self.hedge_param = \"\" # 'beta=X'",
"0 Match = 1 Improvement = 2 Transparent = 3 class Action(str, enum.Enum):",
"ONLY self.scale_init_level_size = None # type: int self.scale_subs_level_size = None # type: int",
"'F' - FX, 'P' - pair self.hedge_param = \"\" # 'beta=X' value for",
"Event[None] on_execution = Event() # type: Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument)",
"security_id_type, security_id = next(iter(self.instrument.security_ids.items())) else: security_id_type = security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action)",
"example, an order placed during the third # quarter of 2011 will be",
"self.trailing_percent = None # type: float # financial advisors only self.fa_group = \"\"",
"force for more than one day will not be reduced for dividends. To",
"type: str self.order_id = 0 self.client_id = 0 self.perm_id = 0 # main",
"None # type: Instrument # Filled by status messages self.status = None #",
"will be assigned a new “Auto Expire” date consistent with the end of",
"min_version=ProtocolVersion.DECISION_MAKER) message.add(self.mifid2_execution_trader, self.mifid2_execution_algo, min_version=ProtocolVersion.MIFID_EXECUTION) def deserialize(self, message: IncomingMessage): assert False, \"Implemented in message",
"self.remaining = None # type: float self.average_fill_price = None # type: float self.perm_id",
"= None # type: float self.trailing_percent = None # type: float # financial",
"ORDERS ONLY self.basis_points = None # type: float # EFP orders only self.basis_points_type",
"double lower = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeLower; #",
"= False # Not Held self.not_held = False self.solicited = False self.model_code =",
"\"GTC\" # Immediate or Cancel. Any portion that is not filled as soon",
"message.add(self.instrument.underlying_component) message.add(self.algo_strategy) if self.algo_strategy: # pragma: no cover (I don't have actual examples",
"message.add(self.trail_stop_price) message.add(self.trailing_percent) # SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment >",
"# type: float # extended order fields self.time_in_force = TimeInForce.GoodTillCancel # for GTC",
"Event() # type: Event[execution.Execution] def serialize(self, message: OutgoingMessage): message.add(self.order_id) message.add(self.instrument) if self.instrument.security_ids: security_id_type,",
"self.algo_strategy = \"\" self.algo_parameters = {} # type: typing.Dict[str, str] self.smart_combo_routing_params = {}",
"Market = 'MKT' MarketIfTouched = 'MIT' MarketOnClose = 'MOC' MarketToLimit = 'MTL' MarketWithProtection",
"your order price on ex-dividend date, consider using a Good-Til-Date/Time (GTD) or #",
"# HEDGE orders message.add(self.hedge_type) if self.hedge_type: # pragma: no cover (I don't have",
"= \"TRAIL LIMIT\" Volatility = 'VOL' class TimeInForce(str, enum.Enum): Day = \"DAY\" #",
"False # BOX exchange orders only self.auction_strategy = AuctionStrategy.Unset self.starting_price = None #",
"float self.stock_ref_price = None # type: float self.delta = None # type: float",
"# SCALE orders message.add(self.scale_init_level_size) message.add(self.scale_subs_level_size) message.add(self.scale_price_increment) if self.scale_price_increment and self.scale_price_increment > 0.0: #",
"you do not log into your IB account for 90 days. # At",
"int self.scale_random_percent = False self.scale_table = \"\" # # HEDGE ORDERS self.hedge_type =",
"Immediate or Cancel. Any portion that is not filled as soon as it",
"message.add(self.order_reference) message.add(self.transmit) message.add(self.parent_id) message.add(self.block_order) message.add(self.sweep_to_fill) message.add(self.display_size) message.add(self.trigger_method) message.add(self.outside_regular_trading_hours) message.add(self.hidden) assert self.instrument.security_type != 'BAG'",
"no cover (Not implemented) raise UnsupportedFeature(\"Order conditions\") message.add(0, # no conditions self.adjusted_order_type, self.trigger_price,",
"self.smart_combo_routing_params = {} # type: typing.Dict[str, str] self.algo_id = \"\" # What-if self.what_if",
"Orders that are modified will be assigned a new “Auto Expire” date consistent",
"= None # type: float self.scale_price_adjust_value = None # type: float self.scale_price_adjust_interval =",
"# type: float self.delta = None # type: float # pegged to stock",
"OutgoingMessage class OrderOrigin(enum.Enum): Customer = 0 Firm = 1 Unknown = 2 class",
"# COMBO ORDERS ONLY self.basis_points = None # type: float # EFP orders",
"None # type: int self.percent_offset = None # type: float self.override_percentage_constraints = False",
"float self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float self.adjustable_trailing_unit = 0 self.conditions = []",
"# native cash quantity self.cash_quantity = 1.7976931348623157e+308 # type: float self.mifid2_decision_maker = \"\"",
"assert self.instrument.security_type != 'BAG' # not supported message.add(\"\") # deprecated sharesAllocation field message.add(self.discretionary_amount)",
"DBL_MAX : selfstockRangeUpper; message.add(self.stock_range_lower) message.add(self.stock_range_upper) message.add(self.override_percentage_constraints) # Volatility orders (srv v26 and above)",
"self.cash_quantity = 1.7976931348623157e+308 # type: float self.mifid2_decision_maker = \"\" self.mifid2_decision_algo = \"\" self.mifid2_execution_trader",
"None # type: float self.scale_price_adjust_value = None # type: float self.scale_price_adjust_interval = None",
"# populate only when shortSaleSlot = 2. message.add(self.exempt_code) message.add(self.oca_type) message.add(self.rule80a) message.add(self.settling_firm) message.add(self.all_or_none) message.add(self.min_quantity)",
"self.delta_neutral_short_sale_slot = 0 self.delta_neutral_designated_location = \"\" self.continuous_update = False self.reference_price_type = None #",
"= 'LMT' LimitIfTouched = 'LIT' LimitOnClose = 'LOC' Market = 'MKT' MarketIfTouched =",
"cancellation will occur at the close of the final trading day of that",
"= \"\" self.algo_parameters = {} # type: typing.Dict[str, str] self.smart_combo_routing_params = {} #",
"+ MKT' Stop = \"STP\" StopLimit = \"STP LMT\" StopWithProtection = \"STP PRT\"",
"no cover (I don't have actual examples of these) message.add(self.hedge_param) message.add(self.opt_out_smart_routing) message.add(self.clearing_account) message.add(self.clearing_intent)",
"# type: str self.inital_margin = None # type: str self.maintenance_margin = None #",
"float self.min_commission = None # type: float self.max_commission = None # type: float",
"= False self.model_code = \"\" self.order_miscellaneous_options = {} # type: typing.Dict[str, str] self.reference_contract_id",
"None # type: float # pegged to stock and VOL orders only self.stock_range_lower",
"= None # type: int self.scale_subs_level_size = None # type: int self.scale_price_increment =",
"message.add(self.fa_method) message.add(self.fa_percentage) message.add(self.fa_profile) message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT) # institutional short saleslot data (srv v18 and",
"# quarter following the current quarter. # Orders submitted to IB that remain",
"self.conditions = [] # type: typing.List[None] # not suppored yet self.conditions_cancel_order = False",
"# Valid for the day only. # Good until canceled. The order will",
"PeggedToMarket = 'PEG MKT' PeggedToStock = 'PEG STK' PeggedToBenchmark = 'PEG BENCH' Relative",
"a non-trading day, # the cancellation will occur at the close of the",
"# type: float # financial advisors only self.fa_group = \"\" self.fa_profile = \"\"",
"security_id_type = security_id = None message.add(security_id_type) message.add(security_id) message.add(self.action) message.add(self.total_quantity) message.add(self.order_type) message.add(self.limit_price) message.add(self.aux_price) message.add(self.time_in_force)"
] |
[
"\" + invitados[i].title() + \" te invito a una cena esta noche en",
"invitado \" + invitados[1].title() + \" no asistira a la cena\" print(message1) invitados[1]=\"<NAME>\"",
"\" + invitados[1].title() + \" no asistira a la cena\" print(message1) invitados[1]=\"<NAME>\" for",
"invitados[i].title() + \" te invito a una cena esta noche en el restaurante",
"+ \" te invito a una cena esta noche en el restaurante Meson",
"cena\" print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola como estas \" + invitados[i].title()",
"range(len(invitados)): message=\"Hola como estas \" + invitados[i].title() + \" te invito a una",
"message1=\"El invitado \" + invitados[1].title() + \" no asistira a la cena\" print(message1)",
"invitados=['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>'] message1=\"El invitado \" + invitados[1].title() + \" no asistira a la cena\"",
"la cena\" print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola como estas \" +",
"\" no asistira a la cena\" print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola",
"message=\"Hola como estas \" + invitados[i].title() + \" te invito a una cena",
"<filename>Ago-Dic-2019/Ricardo_Romero_Medina/Practica1/Practica_3-5.py invitados=['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>'] message1=\"El invitado \" + invitados[1].title() + \" no asistira a la",
"estas \" + invitados[i].title() + \" te invito a una cena esta noche",
"in range(len(invitados)): message=\"Hola como estas \" + invitados[i].title() + \" te invito a",
"+ invitados[1].title() + \" no asistira a la cena\" print(message1) invitados[1]=\"<NAME>\" for i",
"te invito a una cena esta noche en el restaurante Meson Principal.\" print(message)",
"+ invitados[i].title() + \" te invito a una cena esta noche en el",
"invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola como estas \" + invitados[i].title() + \"",
"no asistira a la cena\" print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola como",
"\" te invito a una cena esta noche en el restaurante Meson Principal.\"",
"+ \" no asistira a la cena\" print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)):",
"asistira a la cena\" print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola como estas",
"i in range(len(invitados)): message=\"Hola como estas \" + invitados[i].title() + \" te invito",
"a la cena\" print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola como estas \"",
"print(message1) invitados[1]=\"<NAME>\" for i in range(len(invitados)): message=\"Hola como estas \" + invitados[i].title() +",
"for i in range(len(invitados)): message=\"Hola como estas \" + invitados[i].title() + \" te",
"invitados[1].title() + \" no asistira a la cena\" print(message1) invitados[1]=\"<NAME>\" for i in",
"como estas \" + invitados[i].title() + \" te invito a una cena esta"
] |
[
"Created on: 2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right: Copyright",
"of the IEEE Conference on Computer Vision and Pattern # Recognition, 2017: pp.",
"Networks, # in: International Conference on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2):",
"= Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking Model",
"from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\")",
"with depthwise separable convolutions, # in: Proceedings of the IEEE Conference on Computer",
"x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output =",
"<NAME>, <NAME>, Learning transferable architectures for scalable image recognition, # in: Proceedings of",
"= Model(inputs=base_model.input, outputs=predict) return model # Xception # F. Chollet, Xception: Deep learning",
"IEEE Conference on Computer Vision and Pattern # Recognition, 2017: pp. 1251–1258. def",
"#if __name__ == '__main__': #model = resnet50_model((64, 64), 2) #model = xception_model((64, 64),",
"x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output =",
"3), 3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes,",
"and Pattern # Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray =",
"the IEEE Conference on Computer Vision and Pattern # Recognition, 2018: pp. 8697–8710.",
"<NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks, # in: Proceedings of the",
"num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3),",
"K.expand_dims( x, 3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict",
"3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes,",
"= Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # Xception # F.",
"EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning for",
"in: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), IEEE, Las",
"predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile #",
"base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model =",
"= xception_model((64, 64), 2) #model = nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64, 64),",
"functions #if __name__ == '__main__': #model = resnet50_model((64, 64), 2) #model = xception_model((64,",
"Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5 # <NAME>, <NAME>,",
"#model = resnet50_model((64, 64), 2) #model = xception_model((64, 64), 2) #model = nasnetmobile_model((64,",
"as K from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense from keras.models import Model",
"Kears-2.2.4 \"\"\" import warnings import keras.backend as K from keras.layers import Input, Lambda,",
"input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return",
"= Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False,",
"Chollet, Xception: Deep learning with depthwise separable convolutions, # in: Proceedings of the",
"(CVPR), IEEE, Las Vegas, NV, USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None,",
"Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb =",
"= GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model #",
"2) #model = nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64, 64), 2) #model =",
"and Pattern Recognition (CVPR), IEEE, Las Vegas, NV, USA, 2016: pp. 770–778. #",
"Conference on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb",
"base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model =",
"architectures for scalable image recognition, # in: Proceedings of the IEEE Conference on",
"Inverted residuals and linear bottlenecks, # in: Proceedings of the IEEE Conference on",
"EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks, #",
"# EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,",
"model # MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and",
"Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import keras.backend as K from keras.layers import",
"Learning transferable architectures for scalable image recognition, # in: Proceedings of the IEEE",
"64), 2) #model = nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64, 64), 2) #model",
"Vision and Pattern Recognition (CVPR), IEEE, Las Vegas, NV, USA, 2016: pp. 770–778.",
"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks, # in: International Conference on",
"= Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5 # <NAME>,",
"Convolutional Neural Network (DTCNN) Created on: 2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL> &",
"GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2",
"Network (DTCNN) Created on: 2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy",
"pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb =",
"None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims( x,",
"https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x:",
"Recognition, # in: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),",
"Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2 # <NAME>, <NAME>,",
"# Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning for Image Recognition,",
"# in: Proceedings of the IEEE Conference on Computer Vision and Pattern #",
"Neural Networks, # in: International Conference on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None),",
"on Computer Vision and Pattern Recognition (CVPR), IEEE, Las Vegas, NV, USA, 2016:",
"@File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right: Copyright © 2019-2020 HUST. All",
"Model(inputs=base_model.input, outputs=predict) return model # Xception # F. Chollet, Xception: Deep learning with",
"NV, USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray =",
"on: 2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right: Copyright ©",
"Vision and Pattern # Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray",
"Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda(",
"Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import keras.backend as K from",
"outputs=predict) return model # Xception # F. Chollet, Xception: Deep learning with depthwise",
"3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model",
"3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output)",
"Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb)",
"MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict)",
"770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda(",
"scalable image recognition, # in: Proceedings of the IEEE Conference on Computer Vision",
"Las Vegas, NV, USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2):",
"K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output)",
"depthwise separable convolutions, # in: Proceedings of the IEEE Conference on Computer Vision",
"2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda(",
"warnings import keras.backend as K from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense from",
"activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile # [1]<NAME>, <NAME>, <NAME>,",
"International Conference on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape)",
"keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") #",
"Deep Residual Learning for Image Recognition, # in: 2016 IEEE Conference on Computer",
"activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2 # <NAME>, <NAME>, <NAME>,",
"<NAME>, <NAME>, <NAME>, Deep Residual Learning for Image Recognition, # in: 2016 IEEE",
"for Convolutional Neural Networks, # in: International Conference on Machine Learning, 2019. def",
"GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile",
"lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output",
"Mobilenetv2: Inverted residuals and linear bottlenecks, # in: Proceedings of the IEEE Conference",
"predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # Xception #",
"on Computer Vision and Pattern # Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None),",
"Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb)",
"models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right: Copyright © 2019-2020 HUST. All Rights",
"# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks, #",
"3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes,",
"= Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray)",
"keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>,",
"Learning for Image Recognition, # in: 2016 IEEE Conference on Computer Vision and",
"x, 3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict =",
"nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64, 64), 2) #model = efficientnetb5_model((64, 64), 2)",
"2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right: Copyright © 2019-2020",
"import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning",
"ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict)",
"in: Proceedings of the IEEE Conference on Computer Vision and Pattern # Recognition,",
"<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks, # in:",
"NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures for scalable image recognition,",
"# Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb",
"# in: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), IEEE,",
"separable convolutions, # in: Proceedings of the IEEE Conference on Computer Vision and",
"<NAME>, <NAME>, <NAME>, Learning transferable architectures for scalable image recognition, # in: Proceedings",
"# Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb",
"import Input, Lambda, GlobalAvgPool2D, Dense from keras.models import Model from keras.applications.resnet50 import ResNet50",
"3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output)",
"4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x:",
"Vision and Pattern # Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray",
"def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements(",
"bottlenecks, # in: Proceedings of the IEEE Conference on Computer Vision and Pattern",
"model # EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for Convolutional Neural",
"xception_model((64, 64), 2) #model = nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64, 64), 2)",
"K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output)",
"the IEEE Conference on Computer Vision and Pattern # Recognition, 2017: pp. 1251–1258.",
"3), 3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes,",
"TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import keras.backend as K from keras.layers import Input,",
"#Test DTCNN functions #if __name__ == '__main__': #model = resnet50_model((64, 64), 2) #model",
"= Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2 # <NAME>,",
"USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape)",
"K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output)",
"(DTCNN) Created on: 2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right:",
"Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning for Image Recognition, #",
"= Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = Xception(include_top=False,",
"<NAME>, Deep Residual Learning for Image Recognition, # in: 2016 IEEE Conference on",
"model # Xception # F. Chollet, Xception: Deep learning with depthwise separable convolutions,",
"= Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False,",
"image recognition, # in: Proceedings of the IEEE Conference on Computer Vision and",
"x, 3), 3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict =",
"lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output",
"def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements(",
"MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>,",
"def xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements(",
"2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb",
"x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output =",
"#model = nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64, 64), 2) #model = efficientnetb5_model((64,",
"mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims(",
"Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN functions #if __name__",
"from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>, Deep",
"'__main__': #model = resnet50_model((64, 64), 2) #model = xception_model((64, 64), 2) #model =",
"Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb)",
"K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output)",
"model = Model(inputs=base_model.input, outputs=predict) return model # Xception # F. Chollet, Xception: Deep",
"convolutions, # in: Proceedings of the IEEE Conference on Computer Vision and Pattern",
"lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output",
"return model # Xception # F. Chollet, Xception: Deep learning with depthwise separable",
"import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>, <NAME>,",
"Dense from keras.models import Model from keras.applications.resnet50 import ResNet50 from keras.applications.xception import Xception",
"Model Scaling for Convolutional Neural Networks, # in: International Conference on Machine Learning,",
"Computer Vision and Pattern Recognition (CVPR), IEEE, Las Vegas, NV, USA, 2016: pp.",
"NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict)",
"Proceedings of the IEEE Conference on Computer Vision and Pattern # Recognition, 2018:",
"<NAME>, EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks, # in: International Conference",
"def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements(",
"3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model",
"Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable",
"from keras.models import Model from keras.applications.resnet50 import ResNet50 from keras.applications.xception import Xception from",
"activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN functions #if __name__ ==",
"2017: pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda(",
"DTCNN functions #if __name__ == '__main__': #model = resnet50_model((64, 64), 2) #model =",
"NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 #",
"= Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning",
"keras.applications.resnet50 import ResNet50 from keras.applications.xception import Xception from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2",
"& <EMAIL>) @Copy Right: Copyright © 2019-2020 HUST. All Rights Reserved. @Requirement: Python-3.7.4,",
"Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb)",
"Recognition (CVPR), IEEE, Las Vegas, NV, USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def",
"<NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks, # in: Proceedings of the IEEE",
"Copyright © 2019-2020 HUST. All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import",
"[1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures for scalable image recognition, # in:",
"on Computer Vision and Pattern # Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None),",
"on Computer Vision and Pattern # Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None, None),",
"F. Chollet, Xception: Deep learning with depthwise separable convolutions, # in: Proceedings of",
"Conference on Computer Vision and Pattern # Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None,",
"Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2:",
"output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model",
"from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense from keras.models import Model from keras.applications.resnet50",
"# https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda",
"Computer Vision and Pattern # Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2):",
"Vision and Pattern # Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray",
"on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb =",
"Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb =",
"from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>,",
"Computer Vision and Pattern # Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2):",
"2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), IEEE, Las Vegas,",
"GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # Xception",
"xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims(",
"Convolutional Neural Networks, # in: International Conference on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None,",
"= nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64, 64), 2) #model = efficientnetb5_model((64, 64),",
"2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x:",
"= NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input,",
"linear bottlenecks, # in: Proceedings of the IEEE Conference on Computer Vision and",
"residuals and linear bottlenecks, # in: Proceedings of the IEEE Conference on Computer",
"Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda",
"def resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements(",
"return model # EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for Convolutional",
"outputs=predict) return model # NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures",
"keras.backend as K from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense from keras.models import",
"keras.applications.xception import Xception from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet",
"64), 2) #model = xception_model((64, 64), 2) #model = nasnetmobile_model((64, 64), 2) #model",
"the IEEE Conference on Computer Vision and Pattern # Recognition, 2018: pp. 4510–4520.",
"model = Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking",
"predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2 #",
"Transfer Convolutional Neural Network (DTCNN) Created on: 2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL>",
"Xception: Deep learning with depthwise separable convolutions, # in: Proceedings of the IEEE",
"Lambda, GlobalAvgPool2D, Dense from keras.models import Model from keras.applications.resnet50 import ResNet50 from keras.applications.xception",
"= Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN functions #if",
"Proceedings of the IEEE Conference on Computer Vision and Pattern # Recognition, 2017:",
"Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile # [1]<NAME>, <NAME>,",
"= Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN functions #if __name__ == '__main__': #model",
"ResNet50 from keras.applications.xception import Xception from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2",
"model #Test DTCNN functions #if __name__ == '__main__': #model = resnet50_model((64, 64), 2)",
"3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model",
"learning with depthwise separable convolutions, # in: Proceedings of the IEEE Conference on",
"3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output)",
"predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN functions",
"base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model =",
"nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims(",
"import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50",
"predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5 #",
"warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning for Image",
"Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model",
"Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb)",
"\"\"\"Deep Transfer Convolutional Neural Network (DTCNN) Created on: 2019/12/31 22:01 @File: models.py @Author:<NAME>",
"import warnings import keras.backend as K from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense",
"pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda",
"lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output",
"__name__ == '__main__': #model = resnet50_model((64, 64), 2) #model = xception_model((64, 64), 2)",
"and linear bottlenecks, # in: Proceedings of the IEEE Conference on Computer Vision",
"base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model =",
"efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims(",
"EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict)",
"@Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import keras.backend as K from keras.layers",
"Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN functions #if __name__ == '__main__': #model =",
"Computer Vision and Pattern # Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2):",
"3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output)",
"MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks,",
"<NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning for Image Recognition, # in: 2016",
"= Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,",
"x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output =",
"Residual Learning for Image Recognition, # in: 2016 IEEE Conference on Computer Vision",
"Pattern Recognition (CVPR), IEEE, Las Vegas, NV, USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90.",
"\"\"\" import warnings import keras.backend as K from keras.layers import Input, Lambda, GlobalAvgPool2D,",
"Input, Lambda, GlobalAvgPool2D, Dense from keras.models import Model from keras.applications.resnet50 import ResNet50 from",
"Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling",
"64), 2) #model = mobilenetv2_model((64, 64), 2) #model = efficientnetb5_model((64, 64), 2) #model.summary()",
"model = Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>,",
"2019-2020 HUST. All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import",
"IEEE, Las Vegas, NV, USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None),",
"GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN",
"Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict)",
"GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5",
"Pattern # Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape)",
"Image Recognition, # in: 2016 IEEE Conference on Computer Vision and Pattern Recognition",
"input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3,",
"import Model from keras.applications.resnet50 import ResNet50 from keras.applications.xception import Xception from keras.applications.nasnet import",
"<NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks, # in: International",
"3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes,",
"in: International Conference on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray =",
"K.expand_dims( x, 3), 3, 3))(input_gray) base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict",
"recognition, # in: Proceedings of the IEEE Conference on Computer Vision and Pattern",
"<reponame>hxf1228/dtcnn_elm_lnd \"\"\"Deep Transfer Convolutional Neural Network (DTCNN) Created on: 2019/12/31 22:01 @File: models.py",
"import keras.backend as K from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense from keras.models",
"22:01 @File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right: Copyright © 2019-2020 HUST.",
"3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model",
"model = Model(inputs=base_model.input, outputs=predict) return model # MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>,",
"© 2019-2020 HUST. All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings",
"resnet50_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims(",
"IEEE Conference on Computer Vision and Pattern # Recognition, 2018: pp. 4510–4520. def",
"3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output)",
"Neural Network (DTCNN) Created on: 2019/12/31 22:01 @File: models.py @Author:<NAME> (<EMAIL> & <EMAIL>)",
"x, 3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict =",
"import Xception from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import",
"Scaling for Convolutional Neural Networks, # in: International Conference on Machine Learning, 2019.",
"2) #model = xception_model((64, 64), 2) #model = nasnetmobile_model((64, 64), 2) #model =",
"keras.layers import Input, Lambda, GlobalAvgPool2D, Dense from keras.models import Model from keras.applications.resnet50 import",
"<NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks, # in: Proceedings",
"= Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # NASNetMobile # [1]<NAME>,",
"= Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = ResNet50(include_top=False,",
"= Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input,",
"return model #Test DTCNN functions #if __name__ == '__main__': #model = resnet50_model((64, 64),",
"#model = xception_model((64, 64), 2) #model = nasnetmobile_model((64, 64), 2) #model = mobilenetv2_model((64,",
"(<EMAIL> & <EMAIL>) @Copy Right: Copyright © 2019-2020 HUST. All Rights Reserved. @Requirement:",
"K.expand_dims( x, 3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict",
"Conference on Computer Vision and Pattern Recognition (CVPR), IEEE, Las Vegas, NV, USA,",
"= MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input,",
"resnet50_model((64, 64), 2) #model = xception_model((64, 64), 2) #model = nasnetmobile_model((64, 64), 2)",
"and Pattern # Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray =",
"Right: Copyright © 2019-2020 HUST. All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\"",
"IEEE Conference on Computer Vision and Pattern # Recognition, 2018: pp. 8697–8710. def",
"Rethinking Model Scaling for Convolutional Neural Networks, # in: International Conference on Machine",
"= resnet50_model((64, 64), 2) #model = xception_model((64, 64), 2) #model = nasnetmobile_model((64, 64),",
"= Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = EfficientNetB5(include_top=False,",
"# <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks, # in:",
"model # NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures for scalable",
"Model from keras.applications.resnet50 import ResNet50 from keras.applications.xception import Xception from keras.applications.nasnet import NASNetMobile",
"<NAME>, Learning transferable architectures for scalable image recognition, # in: Proceedings of the",
"Conference on Computer Vision and Pattern # Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None,",
"# in: International Conference on Machine Learning, 2019. def efficientnetb5_model(input_shape=(None, None), num_classes=2): input_gray",
"K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output)",
"x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output =",
"HUST. All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import keras.backend",
"<NAME>, <NAME>, Deep Residual Learning for Image Recognition, # in: 2016 IEEE Conference",
"pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda",
"@Author:<NAME> (<EMAIL> & <EMAIL>) @Copy Right: Copyright © 2019-2020 HUST. All Rights Reserved.",
"<EMAIL>) @Copy Right: Copyright © 2019-2020 HUST. All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4,",
"x, 3), 3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict =",
"<NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks, # in: Proceedings of",
"Deep learning with depthwise separable convolutions, # in: Proceedings of the IEEE Conference",
"K from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense from keras.models import Model from",
"= GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model #Test",
"# F. Chollet, Xception: Deep learning with depthwise separable convolutions, # in: Proceedings",
"Xception # F. Chollet, Xception: Deep learning with depthwise separable convolutions, # in:",
"8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x:",
"return model # MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals",
"input_fakeRgb = Lambda( lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model =",
"2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda(",
"for scalable image recognition, # in: Proceedings of the IEEE Conference on Computer",
"= EfficientNetB5(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input,",
"transferable architectures for scalable image recognition, # in: Proceedings of the IEEE Conference",
"All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import keras.backend as",
"= ResNet50(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input,",
"# NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures for scalable image",
"from keras.applications.xception import Xception from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from",
"pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda",
"# <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning for Image Recognition, # in:",
"keras.models import Model from keras.applications.resnet50 import ResNet50 from keras.applications.xception import Xception from keras.applications.nasnet",
"from keras.applications.resnet50 import ResNet50 from keras.applications.xception import Xception from keras.applications.nasnet import NASNetMobile from",
"== '__main__': #model = resnet50_model((64, 64), 2) #model = xception_model((64, 64), 2) #model",
"outputs=predict) return model # MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted",
"Vegas, NV, USA, 2016: pp. 770–778. # https://doi.org/10.1109/CVPR.2016.90. def resnet50_model(input_shape=(None, None), num_classes=2): input_gray",
"base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model =",
"model = Model(inputs=base_model.input, outputs=predict) return model #Test DTCNN functions #if __name__ == '__main__':",
"Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4 \"\"\" import warnings import keras.backend as K",
"Xception from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 from libs.keras_efficientnets.efficientnet import EfficientNetB5",
"Pattern # Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape)",
"for Image Recognition, # in: 2016 IEEE Conference on Computer Vision and Pattern",
"Conference on Computer Vision and Pattern # Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None,",
"# Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb",
"@Copy Right: Copyright © 2019-2020 HUST. All Rights Reserved. @Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4",
"# Xception # F. Chollet, Xception: Deep learning with depthwise separable convolutions, #",
"libs.keras_efficientnets.efficientnet import EfficientNetB5 warnings.filterwarnings(\"ignore\") # Resnet50 # <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual",
"x, 3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict =",
"Recognition, 2018: pp. 8697–8710. def nasnetmobile_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb =",
"and Pattern # Recognition, 2017: pp. 1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray =",
"Dense(num_classes, activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # Xception # F. Chollet,",
"lambda x: K.repeat_elements( K.expand_dims( x, 3), 3, 3))(input_gray) base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb) output",
"activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # Xception # F. Chollet, Xception:",
"# [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures for scalable image recognition, #",
"return model # NASNetMobile # [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures for",
"import ResNet50 from keras.applications.xception import Xception from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import",
"3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict = Dense(num_classes, activation='softmax')(output) model",
"outputs=predict) return model #Test DTCNN functions #if __name__ == '__main__': #model = resnet50_model((64,",
"GlobalAvgPool2D, Dense from keras.models import Model from keras.applications.resnet50 import ResNet50 from keras.applications.xception import",
"K.expand_dims( x, 3), 3, 3))(input_gray) base_model = Xception(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict",
"outputs=predict) return model # EfficientNet-B5 # <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for",
"K.expand_dims( x, 3), 3, 3))(input_gray) base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb) output = GlobalAvgPool2D()(base_model.output) predict",
"# MobileNetV2 # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear",
"activation='softmax')(output) model = Model(inputs=base_model.input, outputs=predict) return model # EfficientNet-B5 # <NAME>, <NAME>, EfficientNet:",
"IEEE Conference on Computer Vision and Pattern Recognition (CVPR), IEEE, Las Vegas, NV,",
"of the IEEE Conference on Computer Vision and Pattern # Recognition, 2018: pp.",
"Pattern # Recognition, 2018: pp. 4510–4520. def mobilenetv2_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape)",
"1251–1258. def xception_model(input_shape=(None, None), num_classes=2): input_gray = Input(shape=input_shape) input_fakeRgb = Lambda( lambda x:"
] |
[
"visualize method is maximum log likelihood solution obtained by the model-fit so far",
"outputs are stored, visualization, and the pickled objects used by the aggregator output",
"as np import autofit as af \"\"\" The `analysis.py` module contains the dataset",
") except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1,",
"numpy as np import autofit as af \"\"\" The `analysis.py` module contains the",
"model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d =",
"containing the noise values of the data, used for computing the goodness of",
"plt import numpy as np import autofit as af \"\"\" The `analysis.py` module",
"instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\")",
"= (residual_map / self.noise_map) ** 2.0 log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood",
"\"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance",
"computing the goodness of fit metric. \"\"\" super().__init__() self.data = data self.noise_map =",
"as plt import numpy as np import autofit as af \"\"\" The `analysis.py`",
"via a non-linear search). during_analysis If True the visualization is being performed midway",
"numpy array containing the data (e.g. a noisy 1D Gaussian) fitted in the",
"residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 log_likelihood",
"A 1D numpy array containing the data (e.g. a noisy 1D Gaussian) fitted",
"model-fit is going. For your model-fitting problem this function will be overwritten with",
"plt.title(\"Dynesty model fit to 1D Gaussian + Exponential dataset.\") plt.xlabel(\"x values of profile\")",
"import autofit as af \"\"\" The `analysis.py` module contains the dataset and log",
"fit so far.. The `instance` passed into the visualize method is maximum log",
"noise_map:np.ndarray): \"\"\" In this example the `Analysis` object only contains the data and",
"the data (e.g. a noisy 1D Gaussian) fitted in the workspace examples. noise_map",
"the data by this analysis (whose parameters have been set via a non-linear",
"profiles to the dataset. Parameters ---------- instance : af.Collection The model instances of",
"containing the data (e.g. a noisy 1D Gaussian) fitted in the workspace examples.",
"objects used by the aggregator output by this function. instance An instance of",
"of fit metric. \"\"\" super().__init__() self.data = data self.noise_map = noise_map def log_likelihood_function(self,",
"stored, visualization, and the pickled objects used by the aggregator output by this",
"the noise values of the data, used for computing the goodness of fit",
"to the dataset. Parameters ---------- instance : af.Collection The model instances of the",
") except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map =",
"overwritten with plotting functions specific to your problem. Parameters ---------- paths The PyAutoFit",
"1D Gaussian) fitted in the workspace examples. noise_map A 1D numpy array containing",
"change which images are output. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum(",
"log likelihood function which given a model instance (set up by the non-linear",
"plotting functions specific to your problem. Parameters ---------- paths The PyAutoFit paths object",
"output by this function. instance An instance of the model that is being",
"this function will be overwritten with plotting functions specific to your problem. Parameters",
"with plotting functions specific to your problem. Parameters ---------- paths The PyAutoFit paths",
"the non-linear search outputs are stored, visualization, and the pickled objects used by",
"If True the visualization is being performed midway through the non-linear search before",
"been set via a non-linear search). during_analysis If True the visualization is being",
"search and is used to output images indicating the quality of the fit",
"af \"\"\" The `analysis.py` module contains the dataset and log likelihood function which",
"all paths, e.g. where the non-linear search outputs are stored, visualization, and the",
"is maximum log likelihood solution obtained by the model-fit so far and it",
"Returns ------- The log likelihood value indicating how well this model fit the",
"by the non-linear search) fits the dataset and returns the log likelihood of",
"the fit so far.. The `instance` passed into the visualize method is maximum",
"data A 1D numpy array containing the data (e.g. a noisy 1D Gaussian)",
"this example the `Analysis` object only contains the data and noise-map. It can",
"fitted in the workspace examples. noise_map A 1D numpy array containing the noise",
"import numpy as np import autofit as af \"\"\" The `analysis.py` module contains",
"TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, )",
"model fitting problems. Parameters ---------- data A 1D numpy array containing the data",
"= self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 log_likelihood =",
"are stored, visualization, and the pickled objects used by the aggregator output by",
"complex data-sets and model fitting problems. Parameters ---------- data A 1D numpy array",
"the aggregator output by this function. instance An instance of the model that",
"which images are output. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues)",
"During a model-fit, the `visualize` method is called throughout the non-linear search and",
"instance An instance of the model that is being fitted to the data",
"model-fit so far and it can be used to provide on-the-fly images showing",
"for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data,",
"paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): \"\"\" During a model-fit, the `visualize`",
"the model-fit is going. For your model-fitting problem this function will be overwritten",
"noise-map. It can be easily extended, for more complex data-sets and model fitting",
"return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): \"\"\" During",
"noise values of the data, used for computing the goodness of fit metric.",
"example the `Analysis` object only contains the data and noise-map. It can be",
"how well this model fit the dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d",
"model that is being fitted to the data by this analysis (whose parameters",
"can be easily extended, for more complex data-sets and model fitting problems. Parameters",
"it can be used to provide on-the-fly images showing how the model-fit is",
"---------- instance : af.Collection The model instances of the profiles. Returns ------- The",
"color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to 1D",
"this model fit the dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum(",
"Gaussian + Exponential dataset.\") plt.xlabel(\"x values of profile\") plt.ylabel(\"Profile normalization\") os.makedirs(paths.image_path, exist_ok=True) plt.savefig(path.join(paths.image_path,",
"the visualize method is maximum log likelihood solution obtained by the model-fit so",
"used by the aggregator output by this function. instance An instance of the",
"+ Exponential dataset.\") plt.xlabel(\"x values of profile\") plt.ylabel(\"Profile normalization\") os.makedirs(paths.image_path, exist_ok=True) plt.savefig(path.join(paths.image_path, \"model_fit.png\"))",
"quality of the fit so far.. The `instance` passed into the visualize method",
"fit of multiple profiles to the dataset. Parameters ---------- instance : af.Collection The",
"to the data by this analysis (whose parameters have been set via a",
"of a fit of multiple profiles to the dataset. Parameters ---------- instance :",
"solution obtained by the model-fit so far and it can be used to",
"likelihood function which given a model instance (set up by the non-linear search)",
"the profiles. Returns ------- The log likelihood value indicating how well this model",
"to provide on-the-fly images showing how the model-fit is going. For your model-fitting",
"indicating how well this model fit the dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try:",
"The `analysis.py` module contains the dataset and log likelihood function which given a",
"the visualization is being performed midway through the non-linear search before it is",
"which given a model instance (set up by the non-linear search) fits the",
"array containing the data (e.g. a noisy 1D Gaussian) fitted in the workspace",
"and noise-map. It can be easily extended, for more complex data-sets and model",
"object only contains the data and noise-map. It can be easily extended, for",
"dataset and returns the log likelihood of that model. \"\"\" class Analysis(af.Analysis): def",
"import matplotlib.pyplot as plt import numpy as np import autofit as af \"\"\"",
"\"\"\" class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In this example the",
"except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map",
"search before it is finished, which may change which images are output. \"\"\"",
"plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty",
"search). during_analysis If True the visualization is being performed midway through the non-linear",
"by this function. instance An instance of the model that is being fitted",
"far.. The `instance` passed into the visualize method is maximum log likelihood solution",
"autofit as af \"\"\" The `analysis.py` module contains the dataset and log likelihood",
"(whose parameters have been set via a non-linear search). during_analysis If True the",
"instance: af.ModelInstance, during_analysis : bool): \"\"\" During a model-fit, the `visualize` method is",
"profiles. Returns ------- The log likelihood value indicating how well this model fit",
"used to output images indicating the quality of the fit so far.. The",
"manages all paths, e.g. where the non-linear search outputs are stored, visualization, and",
"= noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\" Determine the log likelihood",
"`analysis.py` module contains the dataset and log likelihood function which given a model",
"An instance of the model that is being fitted to the data by",
"dataset and log likelihood function which given a model instance (set up by",
"capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to 1D Gaussian + Exponential",
"profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data -",
"goodness of fit metric. \"\"\" super().__init__() self.data = data self.noise_map = noise_map def",
"data and noise-map. It can be easily extended, for more complex data-sets and",
"paths The PyAutoFit paths object which manages all paths, e.g. where the non-linear",
"1D numpy array containing the noise values of the data, used for computing",
"extended, for more complex data-sets and model fitting problems. Parameters ---------- data A",
"profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues,",
"the dataset and log likelihood function which given a model instance (set up",
"= instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) **",
"how the model-fit is going. For your model-fitting problem this function will be",
"be used to provide on-the-fly images showing how the model-fit is going. For",
"\"\"\" super().__init__() self.data = data self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance) ->",
"of that model. \"\"\" class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In",
"For your model-fitting problem this function will be overwritten with plotting functions specific",
"the dataset and returns the log likelihood of that model. \"\"\" class Analysis(af.Analysis):",
"fit the dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for",
"self.noise_map) ** 2.0 log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood def visualize(self, paths:",
"af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): \"\"\" During a model-fit, the `visualize` method",
"self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 log_likelihood = -0.5",
"likelihood of that model. \"\"\" class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\"",
"log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance,",
"log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): \"\"\" During a",
"object which manages all paths, e.g. where the non-linear search outputs are stored,",
"set via a non-linear search). during_analysis If True the visualization is being performed",
"output. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in",
"model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map)",
"module contains the dataset and log likelihood function which given a model instance",
"non-linear search and is used to output images indicating the quality of the",
"def log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\" Determine the log likelihood of a",
"maximum log likelihood solution obtained by the model-fit so far and it can",
"given a model instance (set up by the non-linear search) fits the dataset",
"is going. For your model-fitting problem this function will be overwritten with plotting",
"analysis (whose parameters have been set via a non-linear search). during_analysis If True",
"\"\"\" During a model-fit, the `visualize` method is called throughout the non-linear search",
": af.Collection The model instances of the profiles. Returns ------- The log likelihood",
"that model. \"\"\" class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In this",
"model instances of the profiles. Returns ------- The log likelihood value indicating how",
"the non-linear search before it is finished, which may change which images are",
"= instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d,",
"plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to 1D Gaussian + Exponential dataset.\") plt.xlabel(\"x",
"and log likelihood function which given a model instance (set up by the",
"instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\",",
"os import matplotlib.pyplot as plt import numpy as np import autofit as af",
"visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): \"\"\" During a model-fit, the",
"model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]),",
"contains the dataset and log likelihood function which given a model instance (set",
"non-linear search). during_analysis If True the visualization is being performed midway through the",
"\"\"\" In this example the `Analysis` object only contains the data and noise-map.",
"array containing the noise values of the data, used for computing the goodness",
"non-linear search outputs are stored, visualization, and the pickled objects used by the",
"likelihood value indicating how well this model fit the dataset. \"\"\" xvalues =",
"y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit",
"in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d",
"fit to 1D Gaussian + Exponential dataset.\") plt.xlabel(\"x values of profile\") plt.ylabel(\"Profile normalization\")",
"used to provide on-the-fly images showing how the model-fit is going. For your",
"the workspace examples. noise_map A 1D numpy array containing the noise values of",
"the pickled objects used by the aggregator output by this function. instance An",
"and is used to output images indicating the quality of the fit so",
"a fit of multiple profiles to the dataset. Parameters ---------- instance : af.Collection",
"contains the data and noise-map. It can be easily extended, for more complex",
"Gaussian) fitted in the workspace examples. noise_map A 1D numpy array containing the",
"before it is finished, which may change which images are output. \"\"\" xvalues",
"fit metric. \"\"\" super().__init__() self.data = data self.noise_map = noise_map def log_likelihood_function(self, instance:",
"a model-fit, the `visualize` method is called throughout the non-linear search and is",
"your model-fitting problem this function will be overwritten with plotting functions specific to",
"The PyAutoFit paths object which manages all paths, e.g. where the non-linear search",
"more complex data-sets and model fitting problems. Parameters ---------- data A 1D numpy",
"sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar(",
"the data, used for computing the goodness of fit metric. \"\"\" super().__init__() self.data",
"import os import matplotlib.pyplot as plt import numpy as np import autofit as",
"`Analysis` object only contains the data and noise-map. It can be easily extended,",
"np import autofit as af \"\"\" The `analysis.py` module contains the dataset and",
"color=\"r\") plt.title(\"Dynesty model fit to 1D Gaussian + Exponential dataset.\") plt.xlabel(\"x values of",
"the dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile",
"data: np.ndarray, noise_map:np.ndarray): \"\"\" In this example the `Analysis` object only contains the",
"instance : af.Collection The model instances of the profiles. Returns ------- The log",
"model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 log_likelihood = -0.5 * sum(chi_squared_map)",
"is finished, which may change which images are output. \"\"\" xvalues = np.arange(self.data.shape[0])",
"this function. instance An instance of the model that is being fitted to",
"can be used to provide on-the-fly images showing how the model-fit is going.",
"data (e.g. a noisy 1D Gaussian) fitted in the workspace examples. noise_map A",
"Parameters ---------- instance : af.Collection The model instances of the profiles. Returns -------",
"log likelihood solution obtained by the model-fit so far and it can be",
"The `instance` passed into the visualize method is maximum log likelihood solution obtained",
"images showing how the model-fit is going. For your model-fitting problem this function",
"float: \"\"\" Determine the log likelihood of a fit of multiple profiles to",
"may change which images are output. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d =",
"up by the non-linear search) fits the dataset and returns the log likelihood",
"= data self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\" Determine",
"The log likelihood value indicating how well this model fit the dataset. \"\"\"",
"to 1D Gaussian + Exponential dataset.\") plt.xlabel(\"x values of profile\") plt.ylabel(\"Profile normalization\") os.makedirs(paths.image_path,",
"provide on-the-fly images showing how the model-fit is going. For your model-fitting problem",
"a model instance (set up by the non-linear search) fits the dataset and",
"elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to 1D Gaussian +",
"np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError:",
"search outputs are stored, visualization, and the pickled objects used by the aggregator",
"which may change which images are output. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d",
"---------- paths The PyAutoFit paths object which manages all paths, e.g. where the",
"for computing the goodness of fit metric. \"\"\" super().__init__() self.data = data self.noise_map",
"performed midway through the non-linear search before it is finished, which may change",
"likelihood of a fit of multiple profiles to the dataset. Parameters ---------- instance",
"showing how the model-fit is going. For your model-fitting problem this function will",
"self.data = data self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\"",
"PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs",
"the `Analysis` object only contains the data and noise-map. It can be easily",
"`visualize` method is called throughout the non-linear search and is used to output",
"profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map,",
"problems. Parameters ---------- data A 1D numpy array containing the data (e.g. a",
"values of the data, used for computing the goodness of fit metric. \"\"\"",
"ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to 1D Gaussian",
"model. \"\"\" class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In this example",
"obtained by the model-fit so far and it can be used to provide",
"matplotlib.pyplot as plt import numpy as np import autofit as af \"\"\" The",
"super().__init__() self.data = data self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float:",
"easily extended, for more complex data-sets and model fitting problems. Parameters ---------- data",
"from os import path import os import matplotlib.pyplot as plt import numpy as",
"non-linear search before it is finished, which may change which images are output.",
"being fitted to the data by this analysis (whose parameters have been set",
"noisy 1D Gaussian) fitted in the workspace examples. noise_map A 1D numpy array",
"the dataset. Parameters ---------- instance : af.Collection The model instances of the profiles.",
"the model that is being fitted to the data by this analysis (whose",
"af.ModelInstance) -> float: \"\"\" Determine the log likelihood of a fit of multiple",
"-> float: \"\"\" Determine the log likelihood of a fit of multiple profiles",
"`instance` passed into the visualize method is maximum log likelihood solution obtained by",
"the log likelihood of that model. \"\"\" class Analysis(af.Analysis): def __init__(self, data: np.ndarray,",
"dataset. Parameters ---------- instance : af.Collection The model instances of the profiles. Returns",
"paths object which manages all paths, e.g. where the non-linear search outputs are",
"x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model",
"parameters have been set via a non-linear search). during_analysis If True the visualization",
"log likelihood of a fit of multiple profiles to the dataset. Parameters ----------",
"as af \"\"\" The `analysis.py` module contains the dataset and log likelihood function",
"instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map",
"value indicating how well this model fit the dataset. \"\"\" xvalues = np.arange(self.data.shape[0])",
"try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d",
"af.ModelInstance, during_analysis : bool): \"\"\" During a model-fit, the `visualize` method is called",
"the model-fit so far and it can be used to provide on-the-fly images",
"search) fits the dataset and returns the log likelihood of that model. \"\"\"",
"only contains the data and noise-map. It can be easily extended, for more",
"\"\"\" Determine the log likelihood of a fit of multiple profiles to the",
"dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in",
"2.0 log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance:",
"finished, which may change which images are output. \"\"\" xvalues = np.arange(self.data.shape[0]) try:",
"images indicating the quality of the fit so far.. The `instance` passed into",
"pickled objects used by the aggregator output by this function. instance An instance",
"during_analysis : bool): \"\"\" During a model-fit, the `visualize` method is called throughout",
"to output images indicating the quality of the fit so far.. The `instance`",
"(e.g. a noisy 1D Gaussian) fitted in the workspace examples. noise_map A 1D",
"a noisy 1D Gaussian) fitted in the workspace examples. noise_map A 1D numpy",
"of the model that is being fitted to the data by this analysis",
"instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0",
"have been set via a non-linear search). during_analysis If True the visualization is",
"of the profiles. Returns ------- The log likelihood value indicating how well this",
"far and it can be used to provide on-the-fly images showing how the",
"- model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 log_likelihood = -0.5 *",
"and model fitting problems. Parameters ---------- data A 1D numpy array containing the",
"Parameters ---------- data A 1D numpy array containing the data (e.g. a noisy",
"except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2,",
"by the model-fit so far and it can be used to provide on-the-fly",
"the quality of the fit so far.. The `instance` passed into the visualize",
"is called throughout the non-linear search and is used to output images indicating",
"yerr=self.noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to",
"and the pickled objects used by the aggregator output by this function. instance",
"visualization, and the pickled objects used by the aggregator output by this function.",
"* sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool):",
"bool): \"\"\" During a model-fit, the `visualize` method is called throughout the non-linear",
"noise_map A 1D numpy array containing the noise values of the data, used",
"so far and it can be used to provide on-the-fly images showing how",
"/ self.noise_map) ** 2.0 log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood def visualize(self,",
"problem. Parameters ---------- paths The PyAutoFit paths object which manages all paths, e.g.",
"class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In this example the `Analysis`",
"where the non-linear search outputs are stored, visualization, and the pickled objects used",
"data-sets and model fitting problems. Parameters ---------- data A 1D numpy array containing",
"Parameters ---------- paths The PyAutoFit paths object which manages all paths, e.g. where",
"likelihood solution obtained by the model-fit so far and it can be used",
"be easily extended, for more complex data-sets and model fitting problems. Parameters ----------",
"sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map",
"in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color=\"k\",",
"noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\" Determine the log likelihood of",
"instances of the profiles. Returns ------- The log likelihood value indicating how well",
"log likelihood value indicating how well this model fit the dataset. \"\"\" xvalues",
"that is being fitted to the data by this analysis (whose parameters have",
"for more complex data-sets and model fitting problems. Parameters ---------- data A 1D",
"instance: af.ModelInstance) -> float: \"\"\" Determine the log likelihood of a fit of",
"it is finished, which may change which images are output. \"\"\" xvalues =",
"os import path import os import matplotlib.pyplot as plt import numpy as np",
"def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In this example the `Analysis` object only",
"TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map /",
"of the fit so far.. The `instance` passed into the visualize method is",
"used for computing the goodness of fit metric. \"\"\" super().__init__() self.data = data",
"np.ndarray, noise_map:np.ndarray): \"\"\" In this example the `Analysis` object only contains the data",
"for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data",
"data, used for computing the goodness of fit metric. \"\"\" super().__init__() self.data =",
"** 2.0 log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths,",
": bool): \"\"\" During a model-fit, the `visualize` method is called throughout the",
"and it can be used to provide on-the-fly images showing how the model-fit",
"on-the-fly images showing how the model-fit is going. For your model-fitting problem this",
"profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map =",
"= sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)",
"problem this function will be overwritten with plotting functions specific to your problem.",
"the log likelihood of a fit of multiple profiles to the dataset. Parameters",
"multiple profiles to the dataset. Parameters ---------- instance : af.Collection The model instances",
"visualization is being performed midway through the non-linear search before it is finished,",
"---------- data A 1D numpy array containing the data (e.g. a noisy 1D",
"of multiple profiles to the dataset. Parameters ---------- instance : af.Collection The model",
"are output. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile",
"The model instances of the profiles. Returns ------- The log likelihood value indicating",
"model-fitting problem this function will be overwritten with plotting functions specific to your",
"chi_squared_map = (residual_map / self.noise_map) ** 2.0 log_likelihood = -0.5 * sum(chi_squared_map) return",
"model fit the dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues)",
"xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance )",
"\"\"\" The `analysis.py` module contains the dataset and log likelihood function which given",
"function will be overwritten with plotting functions specific to your problem. Parameters ----------",
"log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\" Determine the log likelihood of a fit",
"workspace examples. noise_map A 1D numpy array containing the noise values of the",
"indicating the quality of the fit so far.. The `instance` passed into the",
"model-fit, the `visualize` method is called throughout the non-linear search and is used",
"during_analysis If True the visualization is being performed midway through the non-linear search",
"function. instance An instance of the model that is being fitted to the",
"instance of the model that is being fitted to the data by this",
"is being performed midway through the non-linear search before it is finished, which",
"metric. \"\"\" super().__init__() self.data = data self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance)",
"function which given a model instance (set up by the non-linear search) fits",
"to your problem. Parameters ---------- paths The PyAutoFit paths object which manages all",
"method is maximum log likelihood solution obtained by the model-fit so far and",
"be overwritten with plotting functions specific to your problem. Parameters ---------- paths The",
"model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to 1D Gaussian + Exponential dataset.\") plt.xlabel(\"x values",
"and returns the log likelihood of that model. \"\"\" class Analysis(af.Analysis): def __init__(self,",
"throughout the non-linear search and is used to output images indicating the quality",
"log likelihood of that model. \"\"\" class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray):",
"is being fitted to the data by this analysis (whose parameters have been",
"well this model fit the dataset. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d =",
"def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): \"\"\" During a model-fit,",
"which manages all paths, e.g. where the non-linear search outputs are stored, visualization,",
"the non-linear search and is used to output images indicating the quality of",
"model instance (set up by the non-linear search) fits the dataset and returns",
"output images indicating the quality of the fit so far.. The `instance` passed",
"through the non-linear search before it is finished, which may change which images",
"going. For your model-fitting problem this function will be overwritten with plotting functions",
"by the aggregator output by this function. instance An instance of the model",
"(residual_map / self.noise_map) ** 2.0 log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood def",
"aggregator output by this function. instance An instance of the model that is",
"this analysis (whose parameters have been set via a non-linear search). during_analysis If",
"examples. noise_map A 1D numpy array containing the noise values of the data,",
"model fit to 1D Gaussian + Exponential dataset.\") plt.xlabel(\"x values of profile\") plt.ylabel(\"Profile",
"of the data, used for computing the goodness of fit metric. \"\"\" super().__init__()",
"import path import os import matplotlib.pyplot as plt import numpy as np import",
") plt.plot(range(self.data.shape[0]), model_data_1d, color=\"r\") plt.title(\"Dynesty model fit to 1D Gaussian + Exponential dataset.\")",
"is used to output images indicating the quality of the fit so far..",
"functions specific to your problem. Parameters ---------- paths The PyAutoFit paths object which",
"returns the log likelihood of that model. \"\"\" class Analysis(af.Analysis): def __init__(self, data:",
"specific to your problem. Parameters ---------- paths The PyAutoFit paths object which manages",
"data by this analysis (whose parameters have been set via a non-linear search).",
"the `visualize` method is called throughout the non-linear search and is used to",
"True the visualization is being performed midway through the non-linear search before it",
"instance (set up by the non-linear search) fits the dataset and returns the",
"being performed midway through the non-linear search before it is finished, which may",
"in the workspace examples. noise_map A 1D numpy array containing the noise values",
"self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\" Determine the log",
"images are output. \"\"\" xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for",
"__init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In this example the `Analysis` object only contains",
"1D numpy array containing the data (e.g. a noisy 1D Gaussian) fitted in",
"called throughout the non-linear search and is used to output images indicating the",
"data self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float: \"\"\" Determine the",
"e.g. where the non-linear search outputs are stored, visualization, and the pickled objects",
"1D Gaussian + Exponential dataset.\") plt.xlabel(\"x values of profile\") plt.ylabel(\"Profile normalization\") os.makedirs(paths.image_path, exist_ok=True)",
"a non-linear search). during_analysis If True the visualization is being performed midway through",
"so far.. The `instance` passed into the visualize method is maximum log likelihood",
"In this example the `Analysis` object only contains the data and noise-map. It",
"the goodness of fit metric. \"\"\" super().__init__() self.data = data self.noise_map = noise_map",
"will be overwritten with plotting functions specific to your problem. Parameters ---------- paths",
"It can be easily extended, for more complex data-sets and model fitting problems.",
"method is called throughout the non-linear search and is used to output images",
"paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled",
"fits the dataset and returns the log likelihood of that model. \"\"\" class",
"------- The log likelihood value indicating how well this model fit the dataset.",
"by this analysis (whose parameters have been set via a non-linear search). during_analysis",
"Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): \"\"\" In this example the `Analysis` object",
"fitting problems. Parameters ---------- data A 1D numpy array containing the data (e.g.",
"path import os import matplotlib.pyplot as plt import numpy as np import autofit",
"your problem. Parameters ---------- paths The PyAutoFit paths object which manages all paths,",
"-0.5 * sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis :",
"the non-linear search) fits the dataset and returns the log likelihood of that",
"numpy array containing the noise values of the data, used for computing the",
"Determine the log likelihood of a fit of multiple profiles to the dataset.",
"(set up by the non-linear search) fits the dataset and returns the log",
"midway through the non-linear search before it is finished, which may change which",
"fitted to the data by this analysis (whose parameters have been set via",
"= -0.5 * sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis",
"sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): \"\"\"",
"Exponential dataset.\") plt.xlabel(\"x values of profile\") plt.ylabel(\"Profile normalization\") os.makedirs(paths.image_path, exist_ok=True) plt.savefig(path.join(paths.image_path, \"model_fit.png\")) plt.clf()",
"non-linear search) fits the dataset and returns the log likelihood of that model.",
"A 1D numpy array containing the noise values of the data, used for",
"af.Collection The model instances of the profiles. Returns ------- The log likelihood value",
"passed into the visualize method is maximum log likelihood solution obtained by the",
"= np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except",
"into the visualize method is maximum log likelihood solution obtained by the model-fit",
"the data and noise-map. It can be easily extended, for more complex data-sets"
] |
[] |
[
"y = pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces) == 7:",
"header format {}.'.format(s)) def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device",
"cls return wrapper no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority",
"if self.flowcell is None: return self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s):",
"remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for out in flatten_output(task): if hasattr(out, 'remove') and",
"return (not self.rerun or self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a task",
"flowcell_lane=None, tile=None, x=None, y=None): self.device = device self.run = run self.flowcell = flowcell",
"**kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False def run(self): try: return super().run() finally: self._has_rerun",
"not self.complete(): raise RuntimeError('{} is not completed after successful run().'.format(repr(self))) return ret def",
"for a task that can be rerun regardless of its completion status. \"\"\"",
"self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\"",
"self.tile = tile self.x = x self.y = y @property def batch_factor(self): if",
"return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the",
"= logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls, s): pieces = s.split(':') if len(pieces)",
"__init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False def run(self): try: return super().run()",
"return cls return wrapper no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds a",
"run().\"\"\" def run(self): ret = super().run() if not self.complete(): raise RuntimeError('{} is not",
"raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s)) def __init__(self, device, run=None, flowcell=None, flowcell_lane=None,",
"raise RuntimeError('{} is not completed after successful run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning",
"class CheckAfterCompleteMixin: \"\"\"Ensures that a task is completed after a successful run().\"\"\" def",
"flatten_output(task): if hasattr(out, 'remove') and out.exists(): try: out.remove() logger.info('Removed %s.', repr(out)) except: logger.exception('Failed",
"self._has_rerun = True def complete(self): return (not self.rerun or self._has_rerun) and super().complete() class",
"def run(self): try: return super().run() finally: self._has_rerun = True def complete(self): return (not",
"Set the maximum number of time a task can be retried before being",
"for out in flatten_output(task): if hasattr(out, 'remove') and out.exists(): try: out.remove() logger.info('Removed %s.',",
"cls.retry_count = count return cls return wrapper no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin",
"count return cls return wrapper no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds",
"task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin for a task",
"ret = super().run() if not self.complete(): raise RuntimeError('{} is not completed after successful",
"None: return self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def",
"--priority flag to a given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin:",
"= run self.flowcell = flowcell self.flowcell_lane = flowcell_lane self.tile = tile self.x =",
"out in flatten_output(task): if hasattr(out, 'remove') and out.exists(): try: out.remove() logger.info('Removed %s.', repr(out))",
"pieces = s.split(':') if len(pieces) == 5: device, flowcell_lane, tile, x, y =",
"import logging import uuid import luigi from luigi.task import flatten_output from luigi.parameter import",
"if len(pieces) == 5: device, flowcell_lane, tile, x, y = pieces return cls(device,",
"repr(task)) for out in flatten_output(task): if hasattr(out, 'remove') and out.exists(): try: out.remove() logger.info('Removed",
"in flatten_output(task): if hasattr(out, 'remove') and out.exists(): try: out.remove() logger.info('Removed %s.', repr(out)) except:",
"completed after a successful run().\"\"\" def run(self): ret = super().run() if not self.complete():",
"**kwds) self._has_rerun = False def run(self): try: return super().run() finally: self._has_rerun = True",
"y=y) elif len(pieces) == 7: return cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ header",
"x=None, y=None): self.device = device self.run = run self.flowcell = flowcell self.flowcell_lane =",
"time a task can be retried before being disabled as per Luigi retry",
"priority = luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin for a task that",
"self._has_rerun = False def run(self): try: return super().run() finally: self._has_rerun = True def",
"self.complete(): raise RuntimeError('{} is not completed after successful run().'.format(repr(self))) return ret def remove_task_output(task):",
"tile=tile, x=x, y=y) elif len(pieces) == 7: return cls(*pieces) else: raise TypeError('Unsupported Illumina",
"self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set",
"import uuid import luigi from luigi.task import flatten_output from luigi.parameter import ParameterVisibility logger",
"run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device = device self.run = run self.flowcell",
"self.x = x self.y = y @property def batch_factor(self): if self.flowcell is None:",
"== 5: device, flowcell_lane, tile, x, y = pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile,",
"def max_retry(count): \"\"\" Set the maximum number of time a task can be",
"maximum number of time a task can be retried before being disabled as",
"can be rerun regardless of its completion status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False,",
"significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False def run(self): try:",
"from luigi.task import flatten_output from luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader:",
"self.run = run self.flowcell = flowcell self.flowcell_lane = flowcell_lane self.tile = tile self.x",
"try: return super().run() finally: self._has_rerun = True def complete(self): return (not self.rerun or",
"= luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin for a task that can",
"def run(self): ret = super().run() if not self.complete(): raise RuntimeError('{} is not completed",
"def parse(cls, s): pieces = s.split(':') if len(pieces) == 5: device, flowcell_lane, tile,",
"policy. \"\"\" def wrapper(cls): cls.retry_count = count return cls return wrapper no_retry =",
"successful run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for out in",
"from luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls, s):",
"super().run() finally: self._has_rerun = True def complete(self): return (not self.rerun or self._has_rerun) and",
"flatten_output from luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls,",
"luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin for a task that can be",
"complete(self): return (not self.rerun or self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a",
"run(self): ret = super().run() if not self.complete(): raise RuntimeError('{} is not completed after",
"%s...', repr(task)) for out in flatten_output(task): if hasattr(out, 'remove') and out.exists(): try: out.remove()",
"CheckAfterCompleteMixin: \"\"\"Ensures that a task is completed after a successful run().\"\"\" def run(self):",
"task can be retried before being disabled as per Luigi retry policy. \"\"\"",
"= device self.run = run self.flowcell = flowcell self.flowcell_lane = flowcell_lane self.tile =",
"def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device = device self.run",
"\"\"\"Ensures that a task is completed after a successful run().\"\"\" def run(self): ret",
"s.split(':') if len(pieces) == 5: device, flowcell_lane, tile, x, y = pieces return",
"flowcell_lane self.tile = tile self.x = x self.y = y @property def batch_factor(self):",
"if hasattr(out, 'remove') and out.exists(): try: out.remove() logger.info('Removed %s.', repr(out)) except: logger.exception('Failed to",
"finally: self._has_rerun = True def complete(self): return (not self.rerun or self._has_rerun) and super().complete()",
"flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces) == 7: return cls(*pieces) else: raise TypeError('Unsupported",
"being disabled as per Luigi retry policy. \"\"\" def wrapper(cls): cls.retry_count = count",
"= count return cls return wrapper no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that",
"a task is completed after a successful run().\"\"\" def run(self): ret = super().run()",
"= s.split(':') if len(pieces) == 5: device, flowcell_lane, tile, x, y = pieces",
"is completed after a successful run().\"\"\" def run(self): ret = super().run() if not",
"s): pieces = s.split(':') if len(pieces) == 5: device, flowcell_lane, tile, x, y",
"significant=False) class RerunnableTaskMixin: \"\"\" Mixin for a task that can be rerun regardless",
"import ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls, s): pieces =",
"= tile self.x = x self.y = y @property def batch_factor(self): if self.flowcell",
"and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a task is completed after a successful",
"tile, x, y = pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces)",
"TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority flag to a given task.\"\"\" priority =",
"class RerunnableTaskMixin: \"\"\" Mixin for a task that can be rerun regardless of",
"Illumina FASTQ header format {}.'.format(s)) def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None,",
"y @property def batch_factor(self): if self.flowcell is None: return self.device, self.flowcell_lane return self.device,",
"retried before being disabled as per Luigi retry policy. \"\"\" def wrapper(cls): cls.retry_count",
"run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for out in flatten_output(task):",
"return super().run() finally: self._has_rerun = True def complete(self): return (not self.rerun or self._has_rerun)",
"*kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False def run(self): try: return super().run() finally:",
"or self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a task is completed after",
"cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s)) def __init__(self, device, run=None,",
"(not self.rerun or self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a task is",
"completed after successful run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for",
"rerun regardless of its completion status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def",
"self.flowcell = flowcell self.flowcell_lane = flowcell_lane self.tile = tile self.x = x self.y",
"@classmethod def parse(cls, s): pieces = s.split(':') if len(pieces) == 5: device, flowcell_lane,",
"run(self): try: return super().run() finally: self._has_rerun = True def complete(self): return (not self.rerun",
"cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces) == 7: return cls(*pieces) else: raise",
"len(pieces) == 5: device, flowcell_lane, tile, x, y = pieces return cls(device, flowcell_lane=flowcell_lane,",
"is None: return self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':'))",
"logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls, s): pieces = s.split(':') if",
"hasattr(out, 'remove') and out.exists(): try: out.remove() logger.info('Removed %s.', repr(out)) except: logger.exception('Failed to remove",
"def complete(self): return (not self.rerun or self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that",
"self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the maximum number of",
"max_retry(count): \"\"\" Set the maximum number of time a task can be retried",
"a successful run().\"\"\" def run(self): ret = super().run() if not self.complete(): raise RuntimeError('{}",
"wrapper(cls): cls.retry_count = count return cls return wrapper no_retry = max_retry(0) class TaskWithPriorityMixin:",
"if not self.complete(): raise RuntimeError('{} is not completed after successful run().'.format(repr(self))) return ret",
"\"\"\" def wrapper(cls): cls.retry_count = count return cls return wrapper no_retry = max_retry(0)",
"ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls, s): pieces = s.split(':')",
"logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls, s): pieces = s.split(':') if len(pieces) ==",
"tile self.x = x self.y = y @property def batch_factor(self): if self.flowcell is",
"== 7: return cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s)) def",
"number of time a task can be retried before being disabled as per",
"def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False def run(self): try: return",
"tile=None, x=None, y=None): self.device = device self.run = run self.flowcell = flowcell self.flowcell_lane",
"regardless of its completion status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self,",
"run self.flowcell = flowcell self.flowcell_lane = flowcell_lane self.tile = tile self.x = x",
"= pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces) == 7: return",
"device, flowcell_lane, tile, x, y = pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y)",
"else: raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s)) def __init__(self, device, run=None, flowcell=None,",
"TypeError('Unsupported Illumina FASTQ header format {}.'.format(s)) def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None,",
"given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin for a",
"@property def batch_factor(self): if self.flowcell is None: return self.device, self.flowcell_lane return self.device, self.flowcell,",
"\"\"\"Mixin that adds a --priority flag to a given task.\"\"\" priority = luigi.IntParameter(default=0,",
"is not completed after successful run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning up %s...',",
"before being disabled as per Luigi retry policy. \"\"\" def wrapper(cls): cls.retry_count =",
"no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority flag to a",
"a --priority flag to a given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False) class",
"per Luigi retry policy. \"\"\" def wrapper(cls): cls.retry_count = count return cls return",
"x, y = pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces) ==",
"batch_factor(self): if self.flowcell is None: return self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def",
"flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device = device self.run = run self.flowcell =",
"a task can be retried before being disabled as per Luigi retry policy.",
"import luigi from luigi.task import flatten_output from luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface')",
"logger.info('Cleaning up %s...', repr(task)) for out in flatten_output(task): if hasattr(out, 'remove') and out.exists():",
"self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a task is completed after a",
"def remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for out in flatten_output(task): if hasattr(out, 'remove')",
"elif len(pieces) == 7: return cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ header format",
"up %s...', repr(task)) for out in flatten_output(task): if hasattr(out, 'remove') and out.exists(): try:",
"IlluminaFastqHeader: @classmethod def parse(cls, s): pieces = s.split(':') if len(pieces) == 5: device,",
"after a successful run().\"\"\" def run(self): ret = super().run() if not self.complete(): raise",
"after successful run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for out",
"parse(cls, s): pieces = s.split(':') if len(pieces) == 5: device, flowcell_lane, tile, x,",
"logging import uuid import luigi from luigi.task import flatten_output from luigi.parameter import ParameterVisibility",
"device self.run = run self.flowcell = flowcell self.flowcell_lane = flowcell_lane self.tile = tile",
"flowcell_lane, tile, x, y = pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif",
"x self.y = y @property def batch_factor(self): if self.flowcell is None: return self.device,",
"max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority flag to a given task.\"\"\"",
"Mixin for a task that can be rerun regardless of its completion status.",
"can be retried before being disabled as per Luigi retry policy. \"\"\" def",
"completion status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs,",
"status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds)",
"self.rerun or self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a task is completed",
"successful run().\"\"\" def run(self): ret = super().run() if not self.complete(): raise RuntimeError('{} is",
"= luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False",
"'remove') and out.exists(): try: out.remove() logger.info('Removed %s.', repr(out)) except: logger.exception('Failed to remove %s.',",
"retry policy. \"\"\" def wrapper(cls): cls.retry_count = count return cls return wrapper no_retry",
"self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the maximum number",
"class IlluminaFastqHeader: @classmethod def parse(cls, s): pieces = s.split(':') if len(pieces) == 5:",
"IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the maximum number of time a task can",
"def wrapper(cls): cls.retry_count = count return cls return wrapper no_retry = max_retry(0) class",
"class TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority flag to a given task.\"\"\" priority",
"True def complete(self): return (not self.rerun or self._has_rerun) and super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures",
"return cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s)) def __init__(self, device,",
"that adds a --priority flag to a given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False,",
"pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces) == 7: return cls(*pieces)",
"7: return cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s)) def __init__(self,",
"luigi from luigi.task import flatten_output from luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface') class",
"= super().run() if not self.complete(): raise RuntimeError('{} is not completed after successful run().'.format(repr(self)))",
"{}.'.format(s)) def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device = device",
"\"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun",
"positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin for a task that can be rerun",
"to a given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin",
"return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y) elif len(pieces) == 7: return cls(*pieces) else:",
"be retried before being disabled as per Luigi retry policy. \"\"\" def wrapper(cls):",
"task that can be rerun regardless of its completion status. \"\"\" rerun =",
"\"\"\" Mixin for a task that can be rerun regardless of its completion",
"= False def run(self): try: return super().run() finally: self._has_rerun = True def complete(self):",
"RuntimeError('{} is not completed after successful run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning up",
"Luigi retry policy. \"\"\" def wrapper(cls): cls.retry_count = count return cls return wrapper",
"device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device = device self.run = run",
"super().__init__(*kwargs, **kwds) self._has_rerun = False def run(self): try: return super().run() finally: self._has_rerun =",
"len(pieces) == 7: return cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s))",
"def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the maximum number of time",
"super().run() if not self.complete(): raise RuntimeError('{} is not completed after successful run().'.format(repr(self))) return",
"5: device, flowcell_lane, tile, x, y = pieces return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x,",
"ret def remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for out in flatten_output(task): if hasattr(out,",
"positional=False, significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False def run(self):",
"uuid import luigi from luigi.task import flatten_output from luigi.parameter import ParameterVisibility logger =",
"return wrapper no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority flag",
"not completed after successful run().'.format(repr(self))) return ret def remove_task_output(task): logger.info('Cleaning up %s...', repr(task))",
"wrapper no_retry = max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority flag to",
"flowcell self.flowcell_lane = flowcell_lane self.tile = tile self.x = x self.y = y",
"= flowcell_lane self.tile = tile self.x = x self.y = y @property def",
"x=x, y=y) elif len(pieces) == 7: return cls(*pieces) else: raise TypeError('Unsupported Illumina FASTQ",
"self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the maximum",
"self.flowcell_lane = flowcell_lane self.tile = tile self.x = x self.y = y @property",
"FASTQ header format {}.'.format(s)) def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None):",
"return ret def remove_task_output(task): logger.info('Cleaning up %s...', repr(task)) for out in flatten_output(task): if",
"False def run(self): try: return super().run() finally: self._has_rerun = True def complete(self): return",
"format {}.'.format(s)) def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device =",
"its completion status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs, **kwds):",
"disabled as per Luigi retry policy. \"\"\" def wrapper(cls): cls.retry_count = count return",
"y=None): self.device = device self.run = run self.flowcell = flowcell self.flowcell_lane = flowcell_lane",
"a task that can be rerun regardless of its completion status. \"\"\" rerun",
"parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the maximum number of time a",
"= True def complete(self): return (not self.rerun or self._has_rerun) and super().complete() class CheckAfterCompleteMixin:",
"as per Luigi retry policy. \"\"\" def wrapper(cls): cls.retry_count = count return cls",
"rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun =",
"luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def parse(cls, s): pieces",
"that a task is completed after a successful run().\"\"\" def run(self): ret =",
"import flatten_output from luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod def",
"self.device = device self.run = run self.flowcell = flowcell self.flowcell_lane = flowcell_lane self.tile",
"return self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return IlluminaFastqHeader(*s.split(':')) def max_retry(count):",
"\"\"\" Set the maximum number of time a task can be retried before",
"self.y = y @property def batch_factor(self): if self.flowcell is None: return self.device, self.flowcell_lane",
"be rerun regardless of its completion status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False)",
"= max_retry(0) class TaskWithPriorityMixin: \"\"\"Mixin that adds a --priority flag to a given",
"= x self.y = y @property def batch_factor(self): if self.flowcell is None: return",
"RerunnableTaskMixin: \"\"\" Mixin for a task that can be rerun regardless of its",
"flag to a given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\"",
"= y @property def batch_factor(self): if self.flowcell is None: return self.device, self.flowcell_lane return",
"= flowcell self.flowcell_lane = flowcell_lane self.tile = tile self.x = x self.y =",
"self.flowcell is None: return self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane def parse_illumina_fastq_header(s): return",
"__init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None): self.device = device self.run =",
"a given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False) class RerunnableTaskMixin: \"\"\" Mixin for",
"that can be rerun regardless of its completion status. \"\"\" rerun = luigi.BoolParameter(default=False,",
"of its completion status. \"\"\" rerun = luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs,",
"of time a task can be retried before being disabled as per Luigi",
"the maximum number of time a task can be retried before being disabled",
"task is completed after a successful run().\"\"\" def run(self): ret = super().run() if",
"adds a --priority flag to a given task.\"\"\" priority = luigi.IntParameter(default=0, positional=False, significant=False)",
"and out.exists(): try: out.remove() logger.info('Removed %s.', repr(out)) except: logger.exception('Failed to remove %s.', repr(out))",
"luigi.BoolParameter(default=False, positional=False, significant=False) def __init__(self, *kwargs, **kwds): super().__init__(*kwargs, **kwds) self._has_rerun = False def",
"super().complete() class CheckAfterCompleteMixin: \"\"\"Ensures that a task is completed after a successful run().\"\"\"",
"luigi.task import flatten_output from luigi.parameter import ParameterVisibility logger = logging.getLogger('luigi-interface') class IlluminaFastqHeader: @classmethod",
"def batch_factor(self): if self.flowcell is None: return self.device, self.flowcell_lane return self.device, self.flowcell, self.flowcell_lane",
"return IlluminaFastqHeader(*s.split(':')) def max_retry(count): \"\"\" Set the maximum number of time a task"
] |
[
"(Gaia_DR2.l > 180)) | ((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None)))) if query_region:",
"gaia_dr2_source g on g.source_id = tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id = tic.id",
"on RP instead of H \"\"\" name = 'mwm_yso_ob_boss' category = 'science' instrument",
"have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional",
"TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc",
"(AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3))",
"targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 |",
"TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC,",
"output[i][1] if(current_rp < 14.76): current_instrument = 'BOSS' current_cadence = 'bright_3x1' elif(current_rp < 15.075):",
"have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog",
"== TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise,",
"+ \"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton):",
"15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro -",
"Central Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old shorthand",
"+ \" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid =",
"if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set instrument = '\"",
"+ \" set cadence = '\" + current_cadence + \"'\" \" where catalogid",
"zari18pms) & h<13 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres table",
"name = 'mwm_yso_disk_boss' category = 'science' instrument = None # instrument is set",
"position on the sky: Removed below condition. l is glon (galactic longitude) b",
"# all the targets should be within 5 deg of the plane+ #",
"358) | (MIPSGAL.glon < 2), (MIPSGAL.glat > -1) & (MIPSGAL.glat < 1), \"\"\"",
"version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax",
"TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set instrument = '\" +",
"0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region:",
"\"'\" \" where catalogid = \" + str(current_catalogid) + \";\") if current_cadence is",
"of the plane+ few sources that can be located further south of the",
"and it's a costly join. def build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"Gaia_DR2.parallax. if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"old class name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified Description of selection criteria:",
"CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax <",
"of H \"\"\" name = 'mwm_yso_cluster_boss' category = 'science' instrument = None #",
"= 'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): #",
"in mwm_yso_cluster_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if",
"old shorthand name: mwm_yso_s1 Simplified Description of selection criteria: selection of YSOs based",
"Shorthand name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description",
"< 15.5): current_instrument = 'BOSS' current_cadence = 'bright_6x1' else: # All cases should",
"more than one): Pseudo SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro is",
"degree in b from the galactic center, (MIPSGAL.glon > 358) | (MIPSGAL.glon <",
"== TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro -",
"(AllWise.w4mpro >> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5) &",
"parallax is null) For CMZ, the raw sql query would be: select ct.catalogid",
"plane if l>180 (should have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"(Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass",
"> peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /",
"version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5)) if query_region: query",
"misses half the sources. Selection was done on the allwise catalog that had",
"CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5)) if query_region: query =",
"on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro",
"In words: # all the targets should be within 5 deg of the",
"current_cadence = None raise TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process(): ' + 'instrument",
"from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for i in range(len(output)):",
"TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id",
"18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass)",
"Return columns: 2mass id, allwise id, J, H, K, W1, W2, W3, W4",
"== AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8)",
"SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or parallax is null) For",
"= 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): # join with Sagitta",
"| (Gaia_DR2.parallax >> None))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence = '\" + current_cadence + \"'\" \"",
"Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp",
"of the plane if l>180 (should have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"Hence, in the above query, we cannot use TIC_v8.plx instead # of Gaia_DR2.parallax.",
"subset of TIC_v8 # Gaia_DR2 is a subset of TIC_v8 # # 2MASS",
"+ \" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid =",
"| is for peewee SQL union query = query1 | query2 if query_region:",
"is the absolute mag (should have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
"is a subset of 2MASS # mipsgal can be joined to twomass_psc via",
"# Hence: # ((b>-5) and (b<5)) or ((b<-5) and (l > 180)) #",
"* (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error >",
"and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to request BOSS observations, RP",
"update sandbox.temp_mwm_yso_ob_boss \" + \" set cadence = '\" + current_cadence + \"'\"",
"if RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5 and bp_rp between -0.2 and 1.1",
"limit for carton based on RP instead of H \"\"\" name = 'mwm_yso_disk_boss'",
"> 358) | (MIPSGAL.glon < 2), (MIPSGAL.glat > -1) & (MIPSGAL.glat < 1),",
"-0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000",
"\"\"\" YSOs - Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments: New, Split from",
"< 13)) # | is for peewee SQL union query = query1 |",
"where() as S2 query. def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"receive more than one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1",
"+ \"'\" \" where catalogid = \" + str(current_catalogid) + \";\") if current_cadence",
"Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton from",
"Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category",
"query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m,",
".select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8,",
"Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id ==",
"\"\"\" name = 'mwm_yso_variable_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"H, K, W1, W2, W3, W4 cadence options for these targets (list all",
"ct.catalogid from mipsgal m join twomass_psc t on twomass_name = designation join tic_v8",
"RP<15.5 Pseudo SQL (optional): Implementation: age<7.5 and rp<15.5 Comments: Split from Cluster to",
"carton based on RP instead of H \"\"\" name = 'mwm_yso_disk_boss' category =",
"magnitude check added to the previous selection \"\"\" name = 'mwm_yso_variable_boss' category =",
"brighter than H<13 mag. (should have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
"~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise, gaia (allow sources",
"and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02",
"the catalog of clustered structures, with age<7.5 dex and brighter than rp<15.5 mag.",
"SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments:",
"(MIPSGAL.glat < 1), \"\"\" name = 'mwm_yso_cmz_apogee' category = 'science' instrument = 'APOGEE'",
"Above implementation has below clause # and (b>-5 or l>180) and b<-5 #",
"= 2700 def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
".join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int ==",
"log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name =",
"catalog of clustered structures, with age<7.5 dex and brighter than rp<15.5 mag. Wiki",
"2MASS, and all 2MASS have an entry in TIC, but not all the",
"version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp <",
"options, even though no single target will receive more than one): boss_bright_3x1 if",
"-5) & (Gaia_DR2.l > 180)) | ((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None))))",
"and parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>",
"W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int",
"and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to request BOSS",
"Shorthand name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified Description",
"name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name =",
"< phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error<",
"and parallax>0.3 Comments: Split from mwm_yso_s1 to request BOSS observations, same color selection",
"no single target will receive more than one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1",
"current_cadence = 'bright_4x1' elif(current_rp < 15.29): current_instrument = 'BOSS' current_cadence = 'bright_5x1' elif(current_rp",
".join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5))",
"Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))",
"and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name =",
"us anything extra and it's a costly join. def build_query(self, version_id, query_region=None): query",
"query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible, WISE saturated).",
"a Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed check on the position on the",
"SQL (optional): Implementation: age<7.5 and rp<15.5 Comments: Split from Cluster to request BOSS",
"<NAME> (<EMAIL>) # @Date: 2020-06-10 # @Filename: mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)",
"= 'mwm_yso' mapper = 'MWM' priority = 2700 # mipsgal is a subset",
"was crossmatched against against Gaia with 1\" search radius. Return columns: Gaia id,",
"for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if",
"0.2 or g.parallax is null) and ct.version_id = 13 and ct.best is true;",
"(((AllWise.w2mpro - AllWise.w3mpro) > 4) & (AllWise.w4mpro >> None)) | ((AllWise.w3mpro >> None)",
"the above query, we cannot use TIC_v8.plx instead # of Gaia_DR2.parallax. if query_region:",
"* 0.8 + 1.1)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"None current_cadence = None raise TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process(): ' +",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable",
"G>15 or without gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1",
"'instrument = None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update",
"APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old shorthand name:",
"Implementation: (in sagitta | in zari18pms) & rp<15.5 lead contact:<NAME> \"\"\" # peewee",
"update sandbox.temp_mwm_yso_pms_boss \" + \" set cadence = '\" + current_cadence + \"'\"",
"TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8)",
"of TIC_v8 # Gaia_DR2 is a subset of TIC_v8 # # 2MASS is",
"CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton from target_selection.exceptions",
"plane+ # few sources that can be # located further south of the",
"and faint limit for carton based on RP instead of H \"\"\" name",
"in post_process() program = 'mwm_yso' mapper = 'MWM' priority = 2700 # yso_clustering",
"old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of selection criteria:",
"# All values of TIC_v8.plx (for non-null entries) are not the same as",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main sequence",
"sequence, brighter than H<13, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2",
"== Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id",
"'mwm_yso_disk_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"more than one): Pseudo SQL (optional): Implementation: phot_g_mean_mag < 18.5 and h_m <13",
"+ str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss",
"needed: mipsgal Additional cross-matching needed: the table has xmatch included Return columns: mipsgal",
"* 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /",
"RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and",
"sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category = 'science' instrument = 'APOGEE'",
"AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50,",
"as below based on the text. # In words: # all the targets",
"- Embedded APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old",
"h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee'",
"None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \"",
"get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_variable_boss '",
"than one): cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if",
"is null) or (w3mpro is null and w4mpro is null and j_m-h_m>1.1) and",
"w4mpro is null and j_m-h_m>1.1) and (b>-5 or l>180) and b<-5 \"\"\" name",
"# # Due to below, we do not need a between to Catalog",
"a subset of 2MASS # # table catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\"",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag",
"Gaia_DR2.parallax) - 1) < 1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3)) if",
"0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax >",
"statements below are equivalent. (l> 358 or l< 2) and b between -1",
"> 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query = (query .join_from(CatalogToTIC_v8,",
"name = 'mwm_yso_cluster_boss' category = 'science' instrument = None # instrument is set",
">> None)) | ((AllWise.w3mpro >> None) & (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass -",
"and (b<5)) or ((b<-5) and (l > 180)) # l, b in Gaia_DR2",
"mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of selection",
"# mipsgal.twomass_name = TwoMassPSC.designation. # Then join via TIC and catalog_to_tic. # #",
"be joined to gaia_dr2_source via source_id. # # table catalogdb.yso_clustering # Foreign-key constraints:",
"(AllWise.w4mpro >> None)) | ((AllWise.w3mpro >> None) & (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass",
"on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id",
"name: mwm_yso_cmz Simplified Description of selection criteria: selection of sources in the central",
"get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_pms_boss '",
"from \" + \" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for i in range(len(output)):",
"an entry in TIC, but not all the TIC entries have a Gaia",
"tic_v8 tic on tic.twomass_psc = t.designation left outer join gaia_dr2_source g on g.source_id",
"= 'mwm_yso_embedded_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"all the targets should be within 5 deg of the plane+ # few",
"(blank) W4 with W2-W3>4, or saturated W3 and W2, with J-H>1.1. Some contaminants",
"sources that can be located further south of the plane if l>180 (should",
"> 4) & (AllWise.w4mpro >> None)) | ((AllWise.w3mpro >> None) & (AllWise.w4mpro >>",
"query1 | query2 if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
"source catalogs needed: 2mass, allwise Additional cross-matching needed: Return columns: 2mass id, allwise",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton):",
"| boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo",
"> 1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1))",
"'mwm_yso_cmz_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 # mipsgal is",
"Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag,",
"358 or m.glon < 2) and (m.glat > -1 and m.glat < 1)",
"current_cadence = 'bright_5x1' elif(current_rp < 15.5): current_instrument = 'BOSS' current_cadence = 'bright_6x1' else:",
"H, K, parallax cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1",
"class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee old class",
"version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag,",
"left outer join gaia_dr2_source g on g.source_id = tic.gaia_int join catalog_to_tic_v8 ct on",
"and ct.best is true; Note you only need one left outer join between",
"(for 7 < H < 13) Implementation: (in sagitta | in zari18pms) &",
"current_cadence = None raise TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process(): ' + 'instrument",
"Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error >",
"l>180) and b<-5 as below based on the text. # In words: #",
"APOGEE Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old shorthand",
"on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age",
"\";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \"",
"query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss",
"have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise, gaia (allow",
"| in zari18pms) & h<13 lead contact:<NAME> \"\"\" # peewee Model name --->",
"\" + \" set cadence = '\" + current_cadence + \"'\" \" where",
"15.5)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key,",
".switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag <",
"2700 def build_query(self, version_id, query_region=None): # join with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid,",
"mipsgal id, 2mass id, j, h, k, 3.6, 4.8, 8.0, 24 mag cadence",
"1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1)) if",
"(MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import",
"twomass_psc(designation) # # Due to below, we do not need a between to",
"it's a costly join. def build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"Implementation: age<7.5 and h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category = 'science' instrument =",
"set instrument = '\" + current_instrument + \"'\" \" where catalogid = \"",
"is a subset of gaia and # can be joined to gaia_dr2_source via",
"as # values of Gaia_DR2.parallax. # Hence, in the above query, we cannot",
"has xmatch included Return columns: mipsgal id, 2mass id, j, h, k, 3.6,",
"b from the galactic center, (MIPSGAL.glon > 358) | (MIPSGAL.glon < 2), (MIPSGAL.glat",
"# Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category = 'science' instrument",
"SQL union query = query1 | query2 if query_region: query = (query .join_from(CatalogToTIC_v8,",
"SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro is null) or (w3mpro is",
"H<13 mag. (should have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss",
"equivalent. (l> 358 or l< 2) and b between -1 and 1 (m.glon",
"Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"single target will receive more than one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if",
"24 mag cadence options for these targets (list all options, even though no",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set cadence =",
"\"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for",
"Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments: New, Split from PMS Simplified Description",
"\" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0]",
"RP, J, H, K, parallax cadence options for these targets (list all options,",
"# Catalog.catalogid == CatalogToTIC_v8.catalogid # We can remove the join with Catalog in",
"TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC,",
">> None) & (Gaia_DR2.l >> None)))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
"BOSS (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old",
"l is glon (galactic longitude) b is glat (galactic latitude) All four statements",
"if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs",
"1 < Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /",
"no single target will receive more than one): Pseudo SQL (optional): Implementation: age<7.5",
"18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5 + 2.5 >",
"sources. Selection was done on the allwise catalog that had 2mass photometry, and",
"mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog Additional cross-matching",
"for peewee SQL union query = query1 | query2 if query_region: query =",
"\" update sandbox.temp_mwm_yso_pms_boss \" + \" set instrument = '\" + current_instrument +",
"query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main sequence optical",
"> 2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >> None))) if query_region: query =",
"on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) #",
"(Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax)",
"= \" + str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically",
"= 'mwm_yso_ob_boss' category = 'science' instrument = None # instrument is set in",
"PMS Simplified Description of selection criteria: Selecting the clustered sources from the catalog",
"invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old shorthand name:",
"l< 2) and b between -1 and 1 (m.glon > 358 or m.glon",
"class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old class name:",
"YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
"age<7.5 and rp<15.5 Comments: Split from Cluster to request BOSS observations, assigning cadence",
"selection criteria: selection of YSOs, brighter than H<15, saturated (blank) W4 with W2-W3>4,",
"have a Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed check on the position on",
"-0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"'mwm_yso_disk_boss' category = 'science' instrument = None # instrument is set in post_process()",
"YSOs, brighter than H<13, fainter than G>15 or without gaia detection, colors J-H>0,5,",
"closer than parallax>0.3, and brighter than H<13 (should have ~21.5K sources) Wiki page:",
"< 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp *",
".join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id ==",
"def post_process(self, model): \"\"\" cadence options for these targets: boss_bright_3x1 if RP<14.76 |",
"of selection criteria: Selecting the clustered sources from the catalog of vetted pre-main",
"using the values from Gaia since # TIC propagates the coordinates back to",
"peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query",
"for carton based on RP instead of H \"\"\" name = 'mwm_yso_ob_boss' category",
"sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and",
"a counterpart in 2MASS, and all 2MASS have an entry in TIC, but",
"color selection but assigning cadence and faint limit for carton based on RP",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set instrument = '\"",
"h_m<13 and (w2mpro-w3mpro>4 and w4mpro is null) or (w3mpro is null and w4mpro",
"boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and",
"is null) For CMZ, the raw sql query would be: select ct.catalogid from",
"true; Note you only need one left outer join between TIC and Gaia",
"criteria: Selecting the OB stars at the tip of the main sequence, brighter",
"Catalog Shorthand name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified",
"# Replace (b>-5 or l>180) and b<-5 as below based on the text.",
"Additional cross-matching needed: Note: Using the Gaia xmatch somehow misses half the sources.",
"old shorthand name: mwm_yso_s2 Simplified Description of selection criteria: selection of YSOs, brighter",
"(BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations",
"xmatch. (should have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal",
"TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name",
"Gaia_DR2.bp_rp * 2.5 - 1 < Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax)",
"null and w4mpro is null and j_m-h_m>1.1) and (b>-5 or l>180) and b<-5",
"gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for i in",
"propagates the coordinates back to epoch 2000.0 # (b>-5 or l>180) and b<-5",
"Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2)",
"is glat (galactic latitude) All four statements below are equivalent. (l> 358 or",
"to twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation. # Then join via TIC and",
"Simplified Description of selection criteria: Selecting the clustered sources from the catalog of",
"class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of selection criteria: selection",
"Shorthand name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description",
"from \" + \" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for i in range(len(output)):",
"needed: Gaia, 2mass, allwise Additional cross-matching needed: Note: Using the Gaia xmatch somehow",
"and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and",
"raise TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process(): ' + 'instrument = None, cadence=",
"category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso' mapper",
"phot_g_mean_mag is null) and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5",
"(AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1)) if query_region:",
"j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name",
"columns: 2mass id, allwise id, J, H, K, W1, W2, W3, W4 cadence",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs",
"query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main sequence optical variables).",
"id, J, H, K, W1, W2, W3, W4 cadence options for these targets",
"5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) < 1.6 * Gaia_DR2.bp_rp - 2.2,",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set instrument = '\" + current_instrument",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog",
"Split from mwm_yso_s3 to request BOSS observations, RP magnitude check added to the",
"on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >>",
"from mwm_yso_s3 to request BOSS observations, RP magnitude check added to the previous",
"return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR excess). Shorthand name: mwm_yso_disk_boss",
"1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee'",
"'mwm_yso_cluster_boss' category = 'science' instrument = None # instrument is set in post_process()",
"from target_selection.exceptions import TargetSelectionError # See catalog.py for the name of peewee model",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m",
"the Gaia xmatch somehow misses half the sources. Selection was done on the",
"allwise id, J, H, K, W1, W2, W3, W4 cadence options for these",
"to epoch 2000.0 # (b>-5 or l>180) and b<-5 # S2_5 query below",
"Gaia_DR2.parallax > 0.3)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
"TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m",
"Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax)",
"not a subset of 2MASS # # table catalogdb.mipsgal # Foreign-key constraints: #",
"is set in post_process() program = 'mwm_yso' mapper = 'MWM' priority = 2700",
"Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5 - 1 < Gaia_DR2.phot_g_mean_mag - 5 *",
"though no single target will receive more than one): apogee_bright_3x1 (for 7 <",
"((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5)) | ((Gaia_DR2.b < -5) & (Gaia_DR2.l",
"+ str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper (pre-)Main Sequence.",
"in post_process() program = 'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self,",
"the galactic center, (MIPSGAL.glon > 358) | (MIPSGAL.glon < 2), (MIPSGAL.glat > -1)",
"- AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro) >",
"across different version_id) # so the join with Catalog doesn't give us anything",
"query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,",
"cases should be covered above so we should not get here. current_instrument =",
"Gaia xmatch. (should have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"# Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category = 'science' instrument = None",
"mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified Description of selection",
"Hence: # ((b>-5) and (b<5)) or ((b<-5) and (l > 180)) # l,",
"- Central Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old",
"the sources. Selection was done on the allwise catalog that had 2mass photometry,",
"= None current_cadence = None raise TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process(): '",
"on the text. # In words: # all the targets should be within",
"1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro)",
"and (m.glat > -1 and m.glat < 1) and Sources are within 2",
"CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro) > 4) & (AllWise.w4mpro",
"358 or l< 2) and b between -1 and 1 (m.glon > 358",
"< 13, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"'post_process(): ' + 'instrument = None, cadence= None') if current_instrument is not None:",
"page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise, gaia (allow sources that lack gaia",
"== version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is for peewee",
"join catalog_to_tic_v8 ct on ct.target_id = tic.id where m.hmag < 13 and (m.mag_8_0",
"YSOs - Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New Simplified Description of",
"= 2700 # Above implementation has below clause # and (b>-5 or l>180)",
"be covered above so we should not get here. current_instrument = None current_cadence",
"Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or parallax is null) For CMZ, the raw",
"https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog Additional cross-matching needed: Return columns:",
"h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5",
"variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag",
"None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \"",
"\"\"\"YSOs - Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set cadence = '\" + current_cadence",
"to gaia_dr2_source via source_id. # # table catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\"",
"phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error<",
"mag. (should have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20",
"but not all the TIC entries have a Gaia counterpart). Comments: Formerly mwm_yso_cmz,",
"AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8,",
"options, even though no single target will receive more than one): Pseudo SQL",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS",
"one left outer join between TIC and Gaia (all MIPSGAL targets have a",
"constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None):",
"Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 < Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax)",
"\"\"\"YSOs - Nebula APOGEE(optically invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee old class name:",
"(pre-main sequence optical variables). Shorthand name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old shorthand",
"if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set",
"3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta,",
"Note you only need one left outer join between TIC and Gaia (all",
"the HR diagram to select cool pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G,",
"13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro -",
"and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category = 'science' instrument = 'APOGEE' cadence",
"all options, even though no single target will receive more than one): boss_bright_3x1",
"((b<-5) and (l > 180)) # l, b in Gaia_DR2 are gallong and",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs -",
"= self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output =",
"TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id",
"gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K",
"\";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \"",
"to select cool pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in",
"text. # In words: # all the targets should be within 5 deg",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax",
"contact:<NAME> \"\"\" # peewee Model name ---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' #",
"class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of selection criteria: Selecting",
">> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro)",
"query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6,",
"boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation: (in",
"the catalog of clustered structures, with age<7.5 dex and brighter than H<13 mag.",
"== Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h",
"colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and brighter than H<13 (should have",
"| boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: age<7.5 and rp<15.5 Comments: Split",
"and j_m-h_m>1.1) and (b>-5 or l>180) and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category",
"sources that can be # located further south of the plane if l>180",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs",
"all options, even though no single target will receive more than one): apogee_bright_3x1",
"Gaia_DR2.bp_rp * 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax)",
"TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18,",
"target will receive more than one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075",
"within 5 deg of the plane+ few sources that can be located further",
"'MWM' priority = 2700 def build_query(self, version_id, query_region=None): # join with Sagitta query1",
">> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b",
"+ str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss",
"- AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5)) | ((Gaia_DR2.b",
"# Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category =",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set instrument = '\" + current_instrument",
"Disk BOSS (IR excess). Shorthand name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old shorthand",
"query would be: select ct.catalogid from mipsgal m join twomass_psc t on twomass_name",
"-0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split",
"join with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag,",
"update sandbox.temp_mwm_yso_variable_boss \" + \" set instrument = '\" + current_instrument + \"'\"",
"clustered sources from the catalog of vetted pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag (should",
"AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton",
"Split from mwm_yso_s1 to request BOSS observations, same color selection but assigning cadence",
"names corresponding # to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs -",
"= 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 # mipsgal",
"catalogs needed: Gaia, 2mass, allwise Additional cross-matching needed: Note: Using the Gaia xmatch",
"& (Gaia_DR2.b < 5)) | ((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180)) |",
"TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best",
"coordinates back to epoch 2000.0 # (b>-5 or l>180) and b<-5 # S2_5",
"current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set instrument",
"> peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75,",
"== TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id,",
"- AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro) >",
"RP<15.5 Implementation: (in sagitta | in zari18pms) & rp<15.5 lead contact:<NAME> \"\"\" #",
"2000.0 # (b>-5 or l>180) and b<-5 # S2_5 query below has the",
"+ 11 < Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp",
"* (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error,",
"query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),",
"\";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old",
"as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which",
"join tic_v8 tic on tic.twomass_psc = t.designation left outer join gaia_dr2_source g on",
"selection criteria: Selecting the OB stars at the tip of the main sequence,",
"< peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 +",
"than one): Pseudo SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null)",
">> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro) > 4) & (AllWise.w4mpro >>",
"= 2700 def build_query(self, version_id, query_region=None): # join with Sagitta query1 = (CatalogToTIC_v8",
"single target will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and",
"structures, with age<7.5 dex and brighter than H<13 mag. (should have ~45.5K sources)",
"Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int",
"Implementation: rp<15.5 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) <",
"Description of selection criteria: selection of sources in the central molecular zone based",
"\"\"\"YSOs - Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old",
"that had 2mass photometry, and then the resulting selection was crossmatched against against",
"'mwm_yso_variable_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass -",
"Then join via TIC and catalog_to_tic. # # mipsgal is a subset of",
"catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE",
"# In words: # all the targets should be within 5 deg of",
"a between to Catalog and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid # We can",
"page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog Additional cross-matching needed: Return",
"allwise id, G, BP, RP, J, H, K, W1, W2, W3, W4,parallax cadence",
"current_cadence = 'bright_3x1' elif(current_rp < 15.075): current_instrument = 'BOSS' current_cadence = 'bright_4x1' elif(current_rp",
"criteria: selection of YSOs based on IR excess, with WISE colors W1-W2>0.25, W2-W3>0.5,",
"id, allwise id, J, H, K, W1, W2, W3, W4 cadence options for",
".join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id,",
"Additional source catalogs needed: Gaia, 2mass, allwise Additional cross-matching needed: Note: Using the",
"for carton based on RP instead of H \"\"\" name = 'mwm_yso_disk_boss' category",
"name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of",
"Selection was done on the allwise catalog that had 2mass photometry, and then",
"as S2 query. def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"should be covered above so we should not get here. current_instrument = None",
"have an entry in TIC, but not all the TIC entries have a",
"AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro",
"l>180 # Hence: # ((b>-5) and (b<5)) or ((b<-5) and (l > 180))",
"TIC entries have a Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed check on the",
"of Gaia_DR2.parallax. if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id",
"- AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro) >",
"(Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5,",
"mapper = 'MWM' priority = 2700 # yso_clustering is a subset of gaia",
"target will receive more than one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13 and",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton):",
"~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal Additional cross-matching needed:",
"table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name",
"parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from mwm_yso_ob",
".join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >>",
"be joined to twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation. # Then join via",
"TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag",
"Selecting the OB stars at the tip of the main sequence, brighter than",
"gallat in TIC_v8. # We are using the values from Gaia since #",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set cadence = '\"",
"done on the allwise catalog that had 2mass photometry, and then the resulting",
"update sandbox.temp_mwm_yso_ob_boss \" + \" set instrument = '\" + current_instrument + \"'\"",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro) >",
"class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of selection criteria: selection",
"than H<13, have color 8.0-24>2.5, and have parallax<0.2 or lack a Gaia xmatch.",
"if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set",
"+ current_cadence + \"'\" \" where catalogid = \" + str(current_catalogid) + \";\")",
"MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation))",
"than H<15, saturated (blank) W4 with W2-W3>4, or saturated W3 and W2, with",
"and 1 (m.glon > 358 or m.glon < 2) and (m.glat > -1",
"(AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro)",
"old shorthand name: mwm_yso_cluster Simplified Description of selection criteria: Selecting the clustered sources",
"Gaia with 1\" search radius. Return columns: Gaia id, 2mass id, allwise id,",
"version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13,",
"(MIPSGAL.glon < 2), (MIPSGAL.glat > -1) & (MIPSGAL.glat < 1), \"\"\" name =",
"log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag (should have ~52.7K sources) Wiki",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True,",
"SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and j_m-h_m>1 and",
"receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag",
"(should have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia",
"above query, we cannot use TIC_v8.plx instead # of Gaia_DR2.parallax. if query_region: query",
"13, (((AllWise.w2mpro - AllWise.w3mpro) > 4) & (AllWise.w4mpro >> None)) | ((AllWise.w3mpro >>",
"0.02)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"in TIC, but not all the TIC entries have a Gaia counterpart). Comments:",
"\" update sandbox.temp_mwm_yso_pms_boss \" + \" set cadence = '\" + current_cadence +",
"allwise catalog that had 2mass photometry, and then the resulting selection was crossmatched",
"= 'bright_6x1' else: # All cases should be covered above so we should",
"= (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m,",
"K, W1, W2, W3, W4,parallax cadence options for these targets (list all options,",
"current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence",
"m.glon < 2) and (m.glat > -1 and m.glat < 1) and Sources",
"single target will receive more than one): apogee_bright_3x1 (for 7 < H <",
"on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro",
"lack gaia xmatch) Additional cross-matching needed: Note: Using the Gaia xmatch somehow misses",
"peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error,",
"mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): # join with",
"name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of selection criteria: Selecting the",
"RP instead of H \"\"\" name = 'mwm_yso_cluster_boss' category = 'science' instrument =",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source))",
"(optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name",
"name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of",
"0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 < Gaia_DR2.phot_bp_mean_mag - 5",
"and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75)",
"< 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) -",
"join with Catalog doesn't give us anything extra and it's a costly join.",
"class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New",
"catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id, 2mass id, G, BP, RP, J, H,",
"the raw sql query would be: select ct.catalogid from mipsgal m join twomass_psc",
"current_instrument = 'BOSS' current_cadence = 'bright_6x1' else: # All cases should be covered",
"\"\"\" name = 'mwm_yso_ob_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"tic.id where m.hmag < 13 and (m.mag_8_0 - m.mag_24) > 2.5 and (g.parallax",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro) >",
"TIC_v8 # Gaia_DR2 is a subset of TIC_v8 # # 2MASS is not",
"\" update sandbox.temp_mwm_yso_disk_boss \" + \" set instrument = '\" + current_instrument +",
"mipsgal Additional cross-matching needed: the table has xmatch included Return columns: mipsgal id,",
"New, Split from PMS Simplified Description of selection criteria: Selecting the clustered sources",
"and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from mwm_yso_s1 to request BOSS",
"RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss",
"sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia, 2mass, allwise Additional cross-matching",
"instrument is set in post_process() cadence = None # cadence is set in",
"below are equivalent. (l> 358 or l< 2) and b between -1 and",
"Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New Simplified Description of selection criteria:",
"= 'mwm_yso_disk_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"== TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta,",
"from the catalog of vetted pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
">> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro)",
"= None current_cadence = None raise TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process(): '",
"None raise TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process(): ' + 'instrument = None,",
"stars at the tip of the main sequence, brighter than rp<15.5, G<18 mag,",
"== version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp",
"selection of YSOs brighter than H<13, closer than parallax>0.3. Filter on the position",
"of Gaia_DR2.parallax. # Hence, in the above query, we cannot use TIC_v8.plx instead",
"== version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag",
"allwise Additional cross-matching needed: Return columns: 2mass id, allwise id, J, H, K,",
"have relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x is",
"Pseudo SQL (optional): Implementation: rp<15.5 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18",
"w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from mwm_yso_s1 to request",
"\"\"\" name = 'mwm_yso_disk_boss' category = 'science' instrument = None # instrument is",
"# \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query",
"'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"sandbox.temp_mwm_yso_ob_boss \" + \" set cadence = '\" + current_cadence + \"'\" \"",
"def build_query(self, version_id, query_region=None): # join with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"catalog of clustered structures, with age<7.5 dex and brighter than H<13 mag. (should",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog Shorthand",
"cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" +",
"instead # of Gaia_DR2.parallax. if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"= \" + str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper",
"Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m <13 and",
"more than one): Pseudo SQL (optional): Implementation: age<7.5 and h<13 \"\"\" name =",
"< 13, (((AllWise.w2mpro - AllWise.w3mpro) > 4) & (AllWise.w4mpro >> None)) | ((AllWise.w3mpro",
"(b>-5 or l>180) and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category = 'science' instrument",
"== TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 -",
"entry in TIC, but not all the TIC entries have a Gaia counterpart).",
"no single target will receive more than one): apogee_bright_3x1 (for 7 < H",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set cadence = '\"",
"= None current_cadence = None raise TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process(): '",
"Comments: Formerly mwm_yso_cmz, removed check on the position on the sky: Removed below",
"1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5)) | ((Gaia_DR2.b < -5) &",
"null) For CMZ, the raw sql query would be: select ct.catalogid from mipsgal",
"# Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category = 'science' instrument = 'APOGEE'",
"on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8)",
"+ 2.5 > Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp",
"mwm_yso_ob_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if current_instrument",
"one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or parallax",
"> 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro",
"the central molecular zone based on spitzer fluxes from mipsgal. brighter than H<13,",
"the tip of the main sequence, brighter than rp<15.5, G<18 mag, closer than",
"3.6, 4.8, 8.0, 24 mag cadence options for these targets (list all options,",
"and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name =",
"query, we cannot use TIC_v8.plx instead # of Gaia_DR2.parallax. if query_region: query =",
"(pre-main sequence optical variables). Shorthand name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old shorthand",
"cadence = '\" + current_cadence + \"'\" \" where catalogid = \" +",
"\"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for",
"build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m,",
"\"\"\"YSOs - Variable APOGEE (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_apogee old class",
"mipsgal. brighter than H<13, have color 8.0-24>2.5, and have parallax<0.2 or lack a",
"class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old class",
"TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))",
"YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13, YSO_Clustering.age < 7.5))",
"\"\"\"YSOs - Embedded APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton",
"cool pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with",
"# Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category = 'science' instrument",
"below, we do not need a between to Catalog and CatalogToTIC_v8 # Catalog.catalogid",
"TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50,",
"Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs)",
"Using the Gaia xmatch somehow misses half the sources. Selection was done on",
"'mwm_yso_cluster_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set instrument = '\" +",
"# table catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES",
"/ Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 < Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 /",
"== version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # join with Zari18pms query2",
"catalogs needed: 2mass, gaia Additional cross-matching needed: Return columns: Gaia id, 2mass id,",
"sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02",
">> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp *",
"== TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True,",
"1.1)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise",
"mipsgal.twomass_name = TwoMassPSC.designation. # Then join via TIC and catalog_to_tic. # # mipsgal",
"catalogid = \" + str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql(",
"with age<7.5 dex and brighter than H<13 mag. (should have ~45.5K sources) Wiki",
"APOGEE (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old",
"2020-06-10 # @Filename: mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb",
"# REFERENCES twomass_psc(designation) # # Due to below, we do not need a",
"on RP instead of H \"\"\" name = 'mwm_yso_cluster_boss' category = 'science' instrument",
"rp<15.5 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres table name #",
"Pseudo SQL (optional): Implementation: h_m<13 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18",
"current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set cadence",
"= 'MWM' priority = 2700 # yso_clustering is a subset of gaia and",
"TIC_v8. # We are using the values from Gaia since # TIC propagates",
"- 1) < 1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3)) if query_region:",
"xmatch) Additional cross-matching needed: Note: Using the Gaia xmatch somehow misses half the",
"peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 <",
"table catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation)",
"half the sources. Selection was done on the allwise catalog that had 2mass",
"\"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs",
"class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of selection criteria: Selecting",
"and W2, with J-H>1.1. Some contaminants from scanning are filtered on the plane",
"- 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error >",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro) >",
"Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8,",
"Implementation: phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5 >",
"# TIC propagates the coordinates back to epoch 2000.0 # (b>-5 or l>180)",
"SQL (optional): Implementation: phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and",
"(AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro)",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence = '\" + current_cadence",
"receive more than one): apogee_bright_3x1 (for 7 < H < 13) Implementation: (in",
"and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to request BOSS observations, RP magnitude check",
"against Gaia with 1\" search radius. Return columns: Gaia id, 2mass id, allwise",
"for i in range(len(output)): current_catalogid = output[i][0] current_rp = output[i][1] if(current_rp < 14.76):",
">> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax >",
"and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category",
"of gaia and # can be joined to gaia_dr2_source via source_id. # #",
"l and 1 degree in b from the galactic center, (MIPSGAL.glon > 358)",
"of clustered structures, with age<7.5 dex and brighter than H<13 mag. (should have",
"with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error),",
"(should have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, allwise",
"request BOSS observations, assigning cadence and faint limit for carton based on RP",
"MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of selection criteria: selection of YSOs",
"one): Pseudo SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro is null) or",
"than G>15 or without gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates",
"in mwm_yso_pms_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if",
"= self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss ;\") output =",
"Split from PMS Simplified Description of selection criteria: Selecting the clustered sources from",
"via TIC and catalog_to_tic. # # mipsgal is a subset of 2MASS #",
"- 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5 - 1",
"selection criteria: selection of YSOs based on IR excess, with WISE colors W1-W2>0.25,",
"w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category = 'science' instrument = 'APOGEE' cadence =",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2) &",
"anything extra and it's a costly join. def build_query(self, version_id, query_region=None): query =",
"Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs)",
"(twomass_name) # REFERENCES twomass_psc(designation) # # Due to below, we do not need",
"on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0",
"== version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25,",
"H<13 (should have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia,",
"on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id ==",
"below clause # and (b>-5 or l>180) and b<-5 # Replace (b>-5 or",
"mag cadence options for these targets (list all options, even though no single",
"> 1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee model class corresponds to #",
"0.3)) # Gaia_DR2 pweewee model class corresponds to # table catalogdb.gaia_dr2_source. # #",
"catalogs needed: 2mass, allwise Additional cross-matching needed: Return columns: 2mass id, allwise id,",
"name: mwm_yso_pms_boss Comments: New, Split from PMS Simplified Description of selection criteria: Selecting",
"TIC_v8.plx (for non-null entries) are not the same as # values of Gaia_DR2.parallax.",
"2mass id, allwise id, J, H, K, W1, W2, W3, W4 cadence options",
"MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of selection criteria: Selecting the clustered",
"= 'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query",
"radius. Return columns: Gaia id, 2mass id, allwise id, G, BP, RP, J,",
"set in post_process() program = 'mwm_yso' mapper = 'MWM' priority = 2700 #",
"current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set cadence",
"from \" + \" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for i in range(len(output)):",
"or without gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should",
".join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13,",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with Zari18pms",
"and (m.mag_8_0 - m.mag_24) > 2.5 and (g.parallax < 0.2 or g.parallax is",
"of YSOs based on IR excess, with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer",
"and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category",
"== Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp >",
"if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set",
"Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia, 2mass, allwise Additional cross-matching needed:",
"here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_ob_boss ' +",
"one): cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075",
"CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >> None),",
"(<EMAIL>) # @Date: 2020-06-10 # @Filename: mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import",
"Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18,",
"(optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split",
"cadence options for these targets (list all options, even though no single target",
"2MASS have an entry in TIC, but not all the TIC entries have",
"version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),",
"Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id",
"& (Gaia_DR2.l > 180)) | ((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None)))) if",
"Pseudo SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or parallax is null)",
"= 'BOSS' current_cadence = 'bright_3x1' elif(current_rp < 15.075): current_instrument = 'BOSS' current_cadence =",
"(optional): Implementation: age<7.5 and h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category = 'science' instrument",
"even though no single target will receive more than one): apogee_bright_3x1 (for 7",
"< 5)) | ((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180)) | ((Gaia_DR2.b >>",
"BP, RP, J, H, K, W1, W2, W3, W4 cadence options for these",
"Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc",
"and m.glat < 1) and Sources are within 2 degrees in l and",
"join with Catalog in all the cartons # since catalogid is completely unique",
"(Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5",
"Variable BOSS (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton",
"Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int",
"query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss",
"the join with Catalog in all the cartons # since catalogid is completely",
"sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp",
"update sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument = '\" + current_instrument + \"'\"",
"target will receive more than one): apogee_bright_3x1 (for 7 < H < 13)",
"below has the same part before where() as S2 query. def build_query(self, version_id,",
"None raise TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process(): ' + 'instrument = None,",
"cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" +",
"# # table catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) #",
"'bright_6x1' else: # All cases should be covered above so we should not",
"target_selection.cartons import BaseCarton from target_selection.exceptions import TargetSelectionError # See catalog.py for the name",
"> 1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro -",
"sources in the central molecular zone based on spitzer fluxes from mipsgal. brighter",
"needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id, 2mass id, G, BP, RP, J,",
"+ str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone APOGEE. Shorthand",
"or g.parallax is null) and ct.version_id = 13 and ct.best is true; Note",
"cadence = 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 def",
"post_process() program = 'mwm_yso' mapper = 'MWM' priority = 2700 # yso_clustering is",
"Disk APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old shorthand",
"\"\"\" cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075",
"> 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs)",
"mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of selection",
"Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /",
"brighter than H<15, saturated (blank) W4 with W2-W3>4, or saturated W3 and W2,",
"Additional cross-matching needed: Return columns: 2mass id, allwise id, J, H, K, W1,",
"is true; Note you only need one left outer join between TIC and",
"2mass photometry, and then the resulting selection was crossmatched against against Gaia with",
"Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs",
"H \"\"\" name = 'mwm_yso_disk_boss' category = 'science' instrument = None # instrument",
"+ \"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton):",
"even though no single target will receive more than one): Pseudo SQL (optional):",
"\"\"\" name = 'mwm_yso_embedded_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"-5) & (Gaia_DR2.b < 5)) | ((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180))",
"from PMS Simplified Description of selection criteria: Selecting the clustered sources from the",
"Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old shorthand name:",
"1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro -",
"cartons # since catalogid is completely unique (even across different version_id) # so",
"the targets should be within 5 deg of the plane+ # few sources",
"id, G, BP, RP, J, H, K, parallax cadence options for these targets:",
"in TIC_v8. # We are using the values from Gaia since # TIC",
"(MIPSGAL.glat > -1) & (MIPSGAL.glat < 1), \"\"\" name = 'mwm_yso_cmz_apogee' category =",
"query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog Shorthand name:",
"of the plane+ # few sources that can be # located further south",
"- Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments: New, Split from PMS Simplified",
"if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return",
"self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall()",
"and parallax>0.3 Comments: Split from mwm_yso_ob to request BOSS observations, assigning cadence and",
"targets have a counterpart in 2MASS, and all 2MASS have an entry in",
"= tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id = tic.id where m.hmag < 13",
"* 2.5 - 1 < Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) -",
"elif(current_rp < 15.5): current_instrument = 'BOSS' current_cadence = 'bright_6x1' else: # All cases",
"constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation) # # Due to",
"old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of selection criteria:",
"H, K, parallax cadence options for these targets (list all options, even though",
"query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m,",
"has below clause # and (b>-5 or l>180) and b<-5 # Replace (b>-5",
"extra and it's a costly join. def build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid,",
"# and (b>-5 or l>180) and b<-5 # Replace (b>-5 or l>180) and",
"parallax<0.2 or lack a Gaia xmatch. (should have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int ==",
"| boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select",
"Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5 -",
"the OB stars at the tip of the main sequence, brighter than rp<15.5,",
"is completely unique (even across different version_id) # so the join with Catalog",
"- Variable APOGEE (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_apogee old class name:",
"str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper (pre-)Main Sequence. Shorthand",
"# since catalogid is completely unique (even across different version_id) # so the",
"'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso' mapper = 'MWM'",
"priority = 2700 def build_query(self, version_id, query_region=None): # join with Sagitta query1 =",
"\"\"\" name = 'mwm_yso_nebula_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1)) if query_region: query =",
"= 'bright_3x1' elif(current_rp < 15.075): current_instrument = 'BOSS' current_cadence = 'bright_4x1' elif(current_rp <",
"detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources)",
".join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best",
".switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m <",
"though no single target will receive more than one): Pseudo SQL (optional): Implementation:",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set instrument =",
"selection of YSOs, brighter than H<15, saturated (blank) W4 with W2-W3>4, or saturated",
"boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\" cursor",
"= 'mwm_yso_cluster_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"source_id. # # table catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id)",
"needed: 2mass, allwise Additional cross-matching needed: Return columns: 2mass id, allwise id, J,",
"Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is for peewee SQL union query = query1",
"sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to request BOSS observations, RP magnitude check added",
"MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of selection criteria: Selecting the OB",
"twomass_name = designation join tic_v8 tic on tic.twomass_psc = t.designation left outer join",
"YSOs brighter than H<13, closer than parallax>0.3. Filter on the position of the",
"Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee model class corresponds to # table catalogdb.gaia_dr2_source.",
"None) & (Gaia_DR2.l >> None)))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50,",
"AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC,",
"YSO_Clustering.h < 13, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
"var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP,",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set cadence = '\"",
"TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2,",
"for carton based on RP instead of H \"\"\" name = 'mwm_yso_cluster_boss' category",
"if RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \"",
"return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main sequence optical variables). Shorthand",
"current_cadence = None raise TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process(): ' + 'instrument",
"name = 'mwm_yso_ob_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"carton based on RP instead of H \"\"\" name = 'mwm_yso_ob_boss' category =",
"/ Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5 - 1 < Gaia_DR2.phot_g_mean_mag - 5",
"== TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id ==",
"/ Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power(",
"table has xmatch included Return columns: mipsgal id, 2mass id, j, h, k,",
"# mipsgal can be joined to twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation. #",
"(source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"H<13, closer than parallax>0.3. Filter on the position of the HR diagram to",
">> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2)",
"mwm_yso_s1 Simplified Description of selection criteria: selection of YSOs based on IR excess,",
"Comments: Split from mwm_yso_ob to request BOSS observations, assigning cadence and faint limit",
"is null) and ct.version_id = 13 and ct.best is true; Note you only",
"= cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp = output[i][1] if(current_rp",
"from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms)",
"query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments:",
"if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25",
"sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to request BOSS observations, RP magnitude",
"OB stars at the tip of the main sequence, brighter than rp<15.5, G<18",
"(optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and j_m-h_m>1 and h_m-ks_m>0.5",
"current_cadence = None raise TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process(): ' + 'instrument",
"Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs)",
"MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified Description of selection criteria: selection of YSOs,",
"CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1),",
"program = 'mwm_yso' mapper = 'MWM' priority = 2700 # yso_clustering is a",
"current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process():",
"< 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"== TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag >",
"> 2.5 and (g.parallax < 0.2 or g.parallax is null) and ct.version_id =",
"old class name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified Description of selection criteria:",
"MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc ==",
"1) and Sources are within 2 degrees in l and 1 degree in",
"2700 def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key,",
"AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro)",
"< 15.5, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"1 degree in b from the galactic center, (MIPSGAL.glon > 358) | (MIPSGAL.glon",
"# Above implementation has below clause # and (b>-5 or l>180) and b<-5",
"columns: mipsgal id, 2mass id, j, h, k, 3.6, 4.8, 8.0, 24 mag",
"= (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k,",
"boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5 and bp_rp between -0.2 and",
".select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax)",
"the same as # values of Gaia_DR2.parallax. # Hence, in the above query,",
"and catalog_to_tic. # # mipsgal is a subset of 2MASS # 2MASS is",
"and Sources are within 2 degrees in l and 1 degree in b",
"\" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs",
"where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB",
"and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from mwm_yso_s1 to request BOSS observations, same",
">> None) & (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b",
"galactic center, (MIPSGAL.glon > 358) | (MIPSGAL.glon < 2), (MIPSGAL.glat > -1) &",
"& (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5)",
"2.5 and (g.parallax < 0.2 or g.parallax is null) and ct.version_id = 13",
"BOSS Catalog Shorthand name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton):",
"\"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for",
"was done on the allwise catalog that had 2mass photometry, and then the",
"# TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category = 'science' instrument = 'APOGEE' cadence =",
"((AllWise.w3mpro >> None) & (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)),",
"deg of the plane+ # few sources that can be # located further",
"version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3,",
"on twomass_name = designation join tic_v8 tic on tic.twomass_psc = t.designation left outer",
"return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old",
"== version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5,",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query def post_process(self,",
"Additional cross-matching needed: Return columns: Gaia id, 2mass id, G, BP, RP, J,",
"of YSOs, brighter than H<13, fainter than G>15 or without gaia detection, colors",
"APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New Simplified Description of selection criteria: Selecting the",
"if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set",
"1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error >",
"than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog",
"MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc",
"vetted pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m",
"| boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation: (in sagitta | in",
"Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs)",
"t on twomass_name = designation join tic_v8 tic on tic.twomass_psc = t.designation left",
"on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) #",
"or (w3mpro is null and w4mpro is null and j_m-h_m>1.1) and (b>-5 or",
"update sandbox.temp_mwm_yso_disk_boss \" + \" set cadence = '\" + current_cadence + \"'\"",
"Description of selection criteria: selection of YSOs brighter than H<13, closer than parallax>0.3.",
"== Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag <",
"cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output",
"Description of selection criteria: selection of YSOs based on IR excess, with WISE",
"sandbox.temp_mwm_yso_ob_boss \" + \" set instrument = '\" + current_instrument + \"'\" \"",
">> True, TwoMassPSC.h_m < 13)) # | is for peewee SQL union query",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, BOSS",
"/ Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /",
"peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 < Gaia_DR2.phot_bp_mean_mag - 5 *",
"is for peewee SQL union query = query1 | query2 if query_region: query",
"pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with var_x",
"so the join with Catalog doesn't give us anything extra and it's a",
"TIC_v8.plx instead # of Gaia_DR2.parallax. if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"molecular zone based on spitzer fluxes from mipsgal. brighter than H<13, have color",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query def post_process(self, model): \"\"\" cadence options for",
"2MASS # mipsgal can be joined to twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation.",
"- AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee model class corresponds",
"G, BP, RP, J, H, K, W1, W2, W3, W4,parallax cadence options for",
"Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02))",
"options, even though no single target will receive more than one): cadence options",
"power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02",
"mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8",
"K, W1, W2, W3, W4 cadence options for these targets (list all options,",
"Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation) # # Due",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence = '\" +",
"5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5 - 1 <",
"should be within 5 deg of the plane+ # few sources that can",
".join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13,",
"Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category = 'science' instrument = 'APOGEE' cadence",
"\"\"\" YSOs - Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New Simplified Description",
"are gallong and gallat in TIC_v8. # We are using the values from",
"mwm_yso_s2 Simplified Description of selection criteria: selection of YSOs, brighter than H<13, fainter",
"sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with var_x defined",
"RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from",
"= self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss ;\") output =",
"of selection criteria: Selecting the OB stars at the tip of the main",
"id, j, h, k, 3.6, 4.8, 8.0, 24 mag cadence options for these",
"of YSOs brighter than H<13, closer than parallax>0.3. Filter on the position of",
"query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee",
"Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id, 2mass id, G,",
"TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8)",
"is set in post_process() cadence = None # cadence is set in post_process()",
"mipsgal is a subset of 2MASS # 2MASS is a subset of TIC_v8",
"Shorthand name: mwm_yso_pms_boss Comments: New, Split from PMS Simplified Description of selection criteria:",
"brighter than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered",
"model class corresponds to # table catalogdb.gaia_dr2_source. # # All values of TIC_v8.plx",
".join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13,",
"15.29): current_instrument = 'BOSS' current_cadence = 'bright_5x1' elif(current_rp < 15.5): current_instrument = 'BOSS'",
"2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),",
"more than one): Pseudo SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and",
"instead of H \"\"\" name = 'mwm_yso_ob_boss' category = 'science' instrument = None",
"give us anything extra and it's a costly join. def build_query(self, version_id, query_region=None):",
"category = 'science' instrument = None # instrument is set in post_process() cadence",
"observations, same color selection but assigning cadence and faint limit for carton based",
"APOGEE. Shorthand name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified",
"1), Gaia_DR2.bp_rp * 2.5 - 1 < Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 /",
"YSOs based on IR excess, with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than",
"can remove the join with Catalog in all the cartons # since catalogid",
"catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for i",
"2MASS # # table catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name)",
"13) Implementation: (in sagitta | in zari18pms) & h<13 lead contact:<NAME> \"\"\" #",
"None # instrument is set in post_process() cadence = None # cadence is",
"catalog_to_tic_v8 ct on ct.target_id = tic.id where m.hmag < 13 and (m.mag_8_0 -",
"True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5",
"before where() as S2 query. def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid,",
"(optional): Implementation: age<7.5 and rp<15.5 Comments: Split from Cluster to request BOSS observations,",
"MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))",
"update sandbox.temp_mwm_yso_variable_boss \" + \" set cadence = '\" + current_cadence + \"'\"",
"(http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8,",
"CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp",
"Additional source catalogs needed: mipsgal Additional cross-matching needed: the table has xmatch included",
"without gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have",
"of 2MASS # # table catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY",
"query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m,",
"power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\"",
"> (AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1)) if query_region: query = (query",
"REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument =",
"H \"\"\" name = 'mwm_yso_cluster_boss' category = 'science' instrument = None # instrument",
"- OB APOGEE Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton",
"-0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name",
"+ \"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton):",
"'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or parallax is",
"join via TIC and catalog_to_tic. # # mipsgal is a subset of 2MASS",
"2MASS is a subset of TIC_v8 # Gaia_DR2 is a subset of TIC_v8",
"one): Pseudo SQL (optional): Implementation: age<7.5 and h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category",
"based on RP instead of H \"\"\" name = 'mwm_yso_ob_boss' category = 'science'",
"set in post_process() program = 'mwm_yso' mapper = 'MWM' priority = 2700 def",
"old shorthand name: mwm_yso_cmz Simplified Description of selection criteria: selection of sources in",
"and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from",
"target_selection.exceptions import TargetSelectionError # See catalog.py for the name of peewee model names",
"str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \"",
"needed: 2mass+allwise, gaia (allow sources that lack gaia xmatch) Additional cross-matching needed: Note:",
"current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process():",
"in zari18pms) & rp<15.5 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres",
"13 and ct.best is true; Note you only need one left outer join",
"\" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0]",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set cadence = '\" +",
"W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and brighter than H<13 (should have ~21.5K",
"- m.mag_24) > 2.5 and (g.parallax < 0.2 or g.parallax is null) and",
"w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category =",
"all the targets should be within 5 deg of the plane+ few sources",
"name of peewee model names corresponding # to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py",
"name: mwm_yso_cluster Simplified Description of selection criteria: Selecting the clustered sources from the",
"0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee model",
"2.2, Gaia_DR2.parallax > 0.3)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"variables). Shorthand name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified",
"cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss ;\") output",
"== TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8,",
"- Disk BOSS (IR excess). Shorthand name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set instrument = '\" +",
"+ \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" +",
"J, H, K, W1, W2, W3, W4 cadence options for these targets (list",
"on g.source_id = tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id = tic.id where m.hmag",
"based on the text. # In words: # all the targets should be",
"and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid # We can remove the join with",
"above so we should not get here. current_instrument = None current_cadence = None",
"class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old",
"Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) <",
"MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >> None))) if query_region: query",
"and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from mwm_yso_ob to",
"age<7.5 dex and brighter than H<13 mag. (should have ~45.5K sources) Wiki page:",
"2mass id, allwise id, G, BP, RP, J, H, K, W1, W2, W3,",
"program = 'mwm_yso' mapper = 'MWM' priority = 2700 # Above implementation has",
"| boss_bright_6x1 if RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \"",
"= \" + str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence,",
"\"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs",
"= 'BOSS' current_cadence = 'bright_4x1' elif(current_rp < 15.29): current_instrument = 'BOSS' current_cadence =",
"cadence = 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 #",
"SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m <13 and parallax",
"= 'mwm_yso_ob_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"- Nebula APOGEE(optically invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton",
"peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error",
"None))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"all the TIC entries have a Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed check",
".join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13))",
"(AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1)) if query_region: query = (query .join_from(CatalogToTIC_v8,",
"== Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp >",
"13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0,",
"TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon,",
"# REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"# l, b in Gaia_DR2 are gallong and gallat in TIC_v8. # We",
"mwm_yso_s1 to request BOSS observations, same color selection but assigning cadence and faint",
"= None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss",
"None)) | ((AllWise.w3mpro >> None) & (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass)",
"= 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid,",
"and w4mpro is null) or (w3mpro is null and w4mpro is null and",
"needed: Kounkel+20 clustered catalog Additional cross-matching needed: Return columns: Gaia id, 2mass id,",
"name = 'mwm_yso_embedded_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"within 5 deg of the plane+ # few sources that can be #",
"\" + str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone APOGEE.",
"2mass id, j, h, k, 3.6, 4.8, 8.0, 24 mag cadence options for",
"TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC,",
"(Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >> None))) if query_region: query = (query .join_from(CatalogToTIC_v8,",
"SQL (optional): Implementation: rp<15.5 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and",
"or l>180) and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category = 'science' instrument =",
"== version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax >",
"(in sagitta | in zari18pms) & h<13 lead contact:<NAME> \"\"\" # peewee Model",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set instrument = '\" + current_instrument",
"Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3",
"if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional):",
"query = query1 | query2 if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"None current_cadence = None raise TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process(): ' +",
"mipsgal is a subset of 2MASS # mipsgal can be joined to twomass_psc",
"current_instrument = 'BOSS' current_cadence = 'bright_3x1' elif(current_rp < 15.075): current_instrument = 'BOSS' current_cadence",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible, WISE",
"+ \" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid =",
"None current_cadence = None raise TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process(): ' +",
"values of TIC_v8.plx (for non-null entries) are not the same as # values",
"= 2700 def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
"+ \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper (pre-)Main Sequence. Shorthand name:",
"four statements below are equivalent. (l> 358 or l< 2) and b between",
"YSOs - Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments: New, Split from PMS",
"ct.target_id = tic.id where m.hmag < 13 and (m.mag_8_0 - m.mag_24) > 2.5",
"MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8,",
"2.5 - 1 < Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),",
"Pseudo SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and j_m-h_m>1",
"YSOs, brighter than H<15, saturated (blank) W4 with W2-W3>4, or saturated W3 and",
"5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /",
"check added to the previous selection \"\"\" name = 'mwm_yso_variable_boss' category = 'science'",
"> 358 or m.glon < 2) and (m.glat > -1 and m.glat <",
"BOSS observations, same color selection but assigning cadence and faint limit for carton",
"id, G, BP, RP, J, H, K, W1, W2, W3, W4,parallax cadence options",
"more than one): cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1",
"name = 'mwm_yso_pms_boss' category = 'science' instrument = None # instrument is set",
"Shorthand name: mwm_yso_pms_apogee Comments: New Simplified Description of selection criteria: Selecting the clustered",
"if RP<15.5 Pseudo SQL (optional): Implementation: age<7.5 and rp<15.5 Comments: Split from Cluster",
"options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1",
"same color selection but assigning cadence and faint limit for carton based on",
"on the plane of the sky: all the targets should be within 5",
"Cluster to request BOSS observations, assigning cadence and faint limit for carton based",
"old class name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified Description of selection criteria:",
"# Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee'",
"query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old class",
"Simplified Description of selection criteria: selection of YSOs based on IR excess, with",
"& ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5))",
"class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_boss",
"(with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is for",
"is null) and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and",
"and w4mpro is null and j_m-h_m>1.1) and (b>-5 or l>180) and b<-5 \"\"\"",
"b is glat (galactic latitude) All four statements below are equivalent. (l> 358",
"TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id ==",
"of selection criteria: Selecting the clustered sources from the catalog of clustered structures,",
"# to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE",
"Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02,",
"RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and",
"the clustered sources from the catalog of clustered structures, with age<7.5 dex and",
"Comments: New, Split from PMS Simplified Description of selection criteria: Selecting the clustered",
"get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_ob_boss '",
"5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error",
"1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3)) if query_region: query = (query",
"8.0-24>2.5, and have parallax<0.2 or lack a Gaia xmatch. (should have ~3.2K sources)",
"K, parallax cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if",
"gaia (allow sources that lack gaia xmatch) Additional cross-matching needed: Note: Using the",
"Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error *",
"in Gaia_DR2 are gallong and gallat in TIC_v8. # We are using the",
"13 and (m.mag_8_0 - m.mag_24) > 2.5 and (g.parallax < 0.2 or g.parallax",
"FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8",
"name: mwm_yso_s2_5 Simplified Description of selection criteria: selection of YSOs, brighter than H<15,",
"phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1)",
"\" update sandbox.temp_mwm_yso_disk_boss \" + \" set cadence = '\" + current_cadence +",
"covered above so we should not get here. current_instrument = None current_cadence =",
">> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag",
"current_instrument = 'BOSS' current_cadence = 'bright_4x1' elif(current_rp < 15.29): current_instrument = 'BOSS' current_cadence",
"clause # and (b>-5 or l>180) and b<-5 # Replace (b>-5 or l>180)",
"subset of Gaia_DR2 # Gaia_DR2 is not a subset of 2MASS # #",
"= 'science' instrument = None # instrument is set in post_process() cadence =",
"Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, allwise Additional cross-matching needed: Return",
"Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and",
"single target will receive more than one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13",
"name = 'mwm_yso_pms_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"\";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \"",
"Split from Cluster to request BOSS observations, assigning cadence and faint limit for",
"not a subset of Gaia_DR2 # Gaia_DR2 is not a subset of 2MASS",
"deg of the plane+ few sources that can be located further south of",
"mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query = (AllWise",
"MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_boss old",
"sandbox.temp_mwm_yso_variable_boss \" + \" set instrument = '\" + current_instrument + \"'\" \"",
"query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR excess). Shorthand name:",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp",
"these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29",
"YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering,",
"> 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if",
"allwise id, G, BP, RP, J, H, K, W1, W2, W3, W4 cadence",
"+ \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" +",
"j_m-h_m>1.1) and (b>-5 or l>180) and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category =",
"\" update sandbox.temp_mwm_yso_variable_boss \" + \" set cadence = '\" + current_cadence +",
"and brighter than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20",
"W2, W3, W4 cadence options for these targets (list all options, even though",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query def post_process(self, model): \"\"\" cadence",
"BP, RP, J, H, K, parallax cadence options for these targets (list all",
"Gaia since # TIC propagates the coordinates back to epoch 2000.0 # (b>-5",
"only need one left outer join between TIC and Gaia (all MIPSGAL targets",
"sources from the catalog of vetted pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"in zari18pms) & h<13 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres",
"& (MIPSGAL.glat < 1), \"\"\" name = 'mwm_yso_cmz_apogee' category = 'science' instrument =",
"sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and",
"plane of the sky: all the targets should be within 5 deg of",
"5)) | ((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180)) | ((Gaia_DR2.b >> None)",
"'mwm_yso_variable_boss' category = 'science' instrument = None # instrument is set in post_process()",
"be within 5 deg of the plane+ # few sources that can be",
"name: mwm_yso_pms_apogee Comments: New Simplified Description of selection criteria: Selecting the clustered sources",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster",
"designation join tic_v8 tic on tic.twomass_psc = t.designation left outer join gaia_dr2_source g",
"Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from",
"Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m",
"single target will receive more than one): cadence options for these targets: boss_bright_3x1",
"Sequence. Shorthand name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified",
"Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))",
"and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 <",
"TwoMassPSC.h_m < 13)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"on ct.target_id = tic.id where m.hmag < 13 and (m.mag_8_0 - m.mag_24) >",
"peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error)",
"== version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25,",
"== Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag",
"receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5",
"old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of selection criteria:",
"Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton from target_selection.exceptions import TargetSelectionError",
">> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is for peewee SQL union query",
"peewee SQL union query = query1 | query2 if query_region: query = (query",
"if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set",
"RP, J, H, K, W1, W2, W3, W4 cadence options for these targets",
"entries) are not the same as # values of Gaia_DR2.parallax. # Hence, in",
"mwm_yso_s2_5 Simplified Description of selection criteria: selection of YSOs, brighter than H<15, saturated",
"Comments: Split from mwm_yso_s1 to request BOSS observations, same color selection but assigning",
"Description of selection criteria: selection of YSOs, brighter than H<13, fainter than G>15",
"None # cadence is set in post_process() program = 'mwm_yso' mapper = 'MWM'",
"even though no single target will receive more than one): boss_bright_3x1 if RP<14.76",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs",
"and log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag (should have ~52.7K sources)",
"Simplified Description of selection criteria: Selecting the OB stars at the tip of",
"center, (MIPSGAL.glon > 358) | (MIPSGAL.glon < 2), (MIPSGAL.glat > -1) & (MIPSGAL.glat",
"AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50,",
"query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5,",
"phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from mwm_yso_ob to request BOSS observations,",
"# instrument is set in post_process() cadence = None # cadence is set",
"@License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8,",
"return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss old",
"of selection criteria: selection of YSOs based on IR excess, with WISE colors",
"and 1 degree in b from the galactic center, (MIPSGAL.glon > 358) |",
"# Hence, in the above query, we cannot use TIC_v8.plx instead # of",
"request BOSS observations, RP magnitude check added to the previous selection \"\"\" name",
"Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category = 'science'",
"All cases should be covered above so we should not get here. current_instrument",
"| boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation:",
"IR excess, with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and brighter",
"can be # located further south of the plane if l>180 # Hence:",
"page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal Additional cross-matching needed: the table has",
"== Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # |",
"stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with var_x defined as",
"= (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8,",
"<reponame>sdss/target_selection #!/usr/bin/env python # -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>)",
"Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton from target_selection.exceptions import",
"had 2mass photometry, and then the resulting selection was crossmatched against against Gaia",
"stars at the tip of the main sequence, brighter than H<13, G<18 mag,",
"the main sequence, brighter than H<13, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1,",
"18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) < 1.6 *",
"at the tip of the main sequence, brighter than rp<15.5, G<18 mag, closer",
"\"\"\" name = 'mwm_yso_ob_boss' category = 'science' instrument = None # instrument is",
"= 13 and ct.best is true; Note you only need one left outer",
"the targets should be within 5 deg of the plane+ few sources that",
"(should have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia",
"based on IR excess, with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3,",
"all options, even though no single target will receive more than one): 'apogee_bright_3x1'",
"YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id ==",
"from the catalog of clustered structures, with age<7.5 dex and brighter than H<13",
"boss_bright_6x1 if RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" +",
"sandbox.temp_mwm_yso_disk_boss \" + \" set cadence = '\" + current_cadence + \"'\" \"",
"= 'MWM' priority = 2700 # Above implementation has below clause # and",
"= t.designation left outer join gaia_dr2_source g on g.source_id = tic.gaia_int join catalog_to_tic_v8",
"\" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0]",
"more than one): Pseudo SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is",
"have color 8.0-24>2.5, and have parallax<0.2 or lack a Gaia xmatch. (should have",
"== TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms,",
"sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp",
"/ Gaia_DR2.parallax) - 1) < 1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3))",
"selection of YSOs, brighter than H<13, fainter than G>15 or without gaia detection,",
"Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
"Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional cross-matching needed: Return",
"mwm_yso_cmz Simplified Description of selection criteria: selection of sources in the central molecular",
"# values of Gaia_DR2.parallax. # Hence, in the above query, we cannot use",
"m.mag_24) > 2.5 and (g.parallax < 0.2 or g.parallax is null) and ct.version_id",
"None current_cadence = None raise TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process(): ' +",
"& rp<15.5 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres table name",
"phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee'",
"the plane if l>180 # Hence: # ((b>-5) and (b<5)) or ((b<-5) and",
"ct on ct.target_id = tic.id where m.hmag < 13 and (m.mag_8_0 - m.mag_24)",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster",
"id, G, BP, RP, J, H, K, parallax cadence options for these targets",
"H<13, have color 8.0-24>2.5, and have parallax<0.2 or lack a Gaia xmatch. (should",
"Comments: Split from mwm_yso_s3 to request BOSS observations, RP magnitude check added to",
"cadence and faint limit for carton based on RP instead of H \"\"\"",
"catalogs needed: mipsgal Additional cross-matching needed: the table has xmatch included Return columns:",
"peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error,",
"Selecting the clustered sources from the catalog of vetted pre-main sequence stars Wiki",
"(optional): Implementation: rp<15.5 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1)",
"Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag - 5 *",
".switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id,",
"AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro)",
"with W2-W3>4, or saturated W3 and W2, with J-H>1.1. Some contaminants from scanning",
"mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of selection",
"For CMZ, the raw sql query would be: select ct.catalogid from mipsgal m",
"b between -1 and 1 (m.glon > 358 or m.glon < 2) and",
"CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid # We can remove the join with Catalog",
"TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process(): ' + 'instrument = None, cadence= None')",
"be within 5 deg of the plane+ few sources that can be located",
"* (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5 - 1 < Gaia_DR2.phot_g_mean_mag",
".join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best",
"if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\" cursor =",
"and h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category = 'science' instrument = 'APOGEE' cadence",
".switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag <",
"'BOSS' current_cadence = 'bright_5x1' elif(current_rp < 15.5): current_instrument = 'BOSS' current_cadence = 'bright_6x1'",
"than H<13 mag. (should have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"the values from Gaia since # TIC propagates the coordinates back to epoch",
"\" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs -",
"Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
".switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m <",
"gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for i in",
"= 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority =",
"phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category =",
"True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag <",
"and all 2MASS have an entry in TIC, but not all the TIC",
"@Filename: mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL,",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2) &",
"version_id, query_region=None): # join with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13, YSO_Clustering.age < 7.5)) if",
"class corresponds to # table catalogdb.gaia_dr2_source. # # All values of TIC_v8.plx (for",
"h, k, 3.6, 4.8, 8.0, 24 mag cadence options for these targets (list",
"'science' instrument = None # instrument is set in post_process() cadence = None",
"boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: age<7.5 and",
"relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"= None # cadence is set in post_process() program = 'mwm_yso' mapper =",
"Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and j_m-h_m>1 and h_m-ks_m>0.5 and",
"targets should be within 5 deg of the plane+ # few sources that",
"to request BOSS observations, assigning cadence and faint limit for carton based on",
"or lack a Gaia xmatch. (should have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_cluster_boss ' +",
"catalogid is completely unique (even across different version_id) # so the join with",
"'bright_3x1' elif(current_rp < 15.075): current_instrument = 'BOSS' current_cadence = 'bright_4x1' elif(current_rp < 15.29):",
"Shorthand name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description",
"M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass,",
"and (parallax<0.2 or parallax is null) For CMZ, the raw sql query would",
"need one left outer join between TIC and Gaia (all MIPSGAL targets have",
"W1, W2, W3, W4 cadence options for these targets (list all options, even",
"Additional cross-matching needed: the table has xmatch included Return columns: mipsgal id, 2mass",
"id, allwise id, G, BP, RP, J, H, K, W1, W2, W3, W4",
">> True, TwoMassPSC.h_m < 13)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid,",
"\"\"\" name = 'mwm_yso_cmz_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"(Gaia_DR2.l >> None)))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
"where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded",
"= 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 # yso_clustering",
"class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss old class name:",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query def post_process(self, model): \"\"\" cadence options",
"and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category",
"closer than parallax>0.3. Filter on the position of the HR diagram to select",
"return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main sequence optical variables). Shorthand",
"phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from mwm_yso_s1",
"and (w2mpro-w3mpro>4 and w4mpro is null) or (w3mpro is null and w4mpro is",
"~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog Additional",
"\" where catalogid = \" + str(current_catalogid) + \";\") if current_cadence is not",
"with Catalog doesn't give us anything extra and it's a costly join. def",
"sagitta | in zari18pms) & h<13 lead contact:<NAME> \"\"\" # peewee Model name",
"shorthand name: mwm_yso_s3 Simplified Description of selection criteria: selection of YSOs brighter than",
"Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category",
"current_instrument = 'BOSS' current_cadence = 'bright_5x1' elif(current_rp < 15.5): current_instrument = 'BOSS' current_cadence",
"elif(current_rp < 15.29): current_instrument = 'BOSS' current_cadence = 'bright_5x1' elif(current_rp < 15.5): current_instrument",
"bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3",
"(in sagitta | in zari18pms) & rp<15.5 lead contact:<NAME> \"\"\" # peewee Model",
"join between TIC and Gaia (all MIPSGAL targets have a counterpart in 2MASS,",
"options for these targets (list all options, even though no single target will",
"l>180) and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category = 'science' instrument = 'APOGEE'",
"# Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation) # #",
"import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC,",
"no single target will receive more than one): Pseudo SQL (optional): Implementation: phot_g_mean_mag",
"catalog that had 2mass photometry, and then the resulting selection was crossmatched against",
"will receive more than one): Pseudo SQL (optional): Implementation: phot_g_mean_mag < 18.5 and",
"str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, APOGEE Shorthand name:",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp",
"version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is for peewee SQL",
"mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki",
"all options, even though no single target will receive more than one): Pseudo",
"< 18.5 and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and",
"source catalogs needed: Kounkel+20 clustered catalog Additional cross-matching needed: Return columns: Gaia id,",
"J, H, K, parallax cadence options for these targets: boss_bright_3x1 if RP<14.76 |",
"version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >>",
"unique (even across different version_id) # so the join with Catalog doesn't give",
"- Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old shorthand",
"bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to request",
"of Gaia_DR2 # Gaia_DR2 is not a subset of 2MASS # # table",
"name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified Description of selection criteria: selection of",
"/ Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75),",
"= None raise TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process(): ' + 'instrument =",
"AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00,",
"(peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5 - 1 < Gaia_DR2.phot_g_mean_mag -",
"Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category = 'science' instrument = None #",
"Return columns: Gaia id, 2mass id, allwise id, G, BP, RP, J, H,",
"parallax cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075",
"== Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # join",
"and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and",
"None current_cadence = None raise TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process(): ' +",
"g on g.source_id = tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id = tic.id where",
"Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id ==",
"ct.best is true; Note you only need one left outer join between TIC",
".join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5,",
"YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton from target_selection.exceptions import TargetSelectionError # See catalog.py",
"< 1) and Sources are within 2 degrees in l and 1 degree",
"on the position of the HR diagram to select cool pre-main sequence stars,",
"name ---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta'",
"= None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs -",
"or m.glon < 2) and (m.glat > -1 and m.glat < 1) and",
"you only need one left outer join between TIC and Gaia (all MIPSGAL",
"mwm_yso_cluster Simplified Description of selection criteria: Selecting the clustered sources from the catalog",
"name = 'mwm_yso_ob_boss' category = 'science' instrument = None # instrument is set",
"OB stars at the tip of the main sequence, brighter than H<13, G<18",
"shorthand name: mwm_yso_s2 Simplified Description of selection criteria: selection of YSOs, brighter than",
"TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise ==",
"(W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise,",
"latitude) All four statements below are equivalent. (l> 358 or l< 2) and",
"excess, with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and brighter than",
"var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag (should have",
"current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set cadence",
"non-null entries) are not the same as # values of Gaia_DR2.parallax. # Hence,",
"W4 cadence options for these targets (list all options, even though no single",
"have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper (pre-)Main",
"Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5",
"RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5 and bp_rp between",
"brighter than rp<15.5, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should",
"class name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified Description of selection criteria: selection",
"MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments: New, Split",
"(AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee model class",
"True, TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) >",
"model names corresponding # to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs",
"defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in",
"between to Catalog and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid # We can remove",
"== AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro -",
"H, K, W1, W2, W3, W4,parallax cadence options for these targets (list all",
"-1 and m.glat < 1) and Sources are within 2 degrees in l",
"more than one): apogee_bright_3x1 (for 7 < H < 13) Implementation: (in sagitta",
"of TIC_v8.plx (for non-null entries) are not the same as # values of",
"# ((b>-5) and (b<5)) or ((b<-5) and (l > 180)) # l, b",
"\"\"\"YSOs - Disk BOSS (IR excess). Shorthand name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton",
"the absolute mag (should have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"< peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /",
"dex and brighter than H<13 mag. (should have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR excess). Shorthand name: mwm_yso_disk_boss old class",
"b in Gaia_DR2 are gallong and gallat in TIC_v8. # We are using",
"0.3)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set cadence =",
"on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))",
"Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error",
"table catalogdb.gaia_dr2_source. # # All values of TIC_v8.plx (for non-null entries) are not",
"- AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3)) #",
"Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
"None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \"",
"sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3",
"# table catalogdb.gaia_dr2_source. # # All values of TIC_v8.plx (for non-null entries) are",
"and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category = 'science'",
"w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category = 'science' instrument = 'APOGEE'",
"mwm_yso_s3 Simplified Description of selection criteria: selection of YSOs brighter than H<13, closer",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton):",
"import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons",
"current_catalogid = output[i][0] current_rp = output[i][1] if(current_rp < 14.76): current_instrument = 'BOSS' current_cadence",
"a subset of gaia and # can be joined to gaia_dr2_source via source_id.",
"\";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee",
"g.source_id = tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id = tic.id where m.hmag <",
"sequence optical variables). Shorthand name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old shorthand name:",
"Implementation: (in sagitta | in zari18pms) & h<13 lead contact:<NAME> \"\"\" # peewee",
"Shorthand name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified Description",
"of the main sequence, brighter than H<13, G<18 mag, closer than parallax>0.3, color",
"be: select ct.catalogid from mipsgal m join twomass_psc t on twomass_name = designation",
".switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag <",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) >",
"b<-5 # S2_5 query below has the same part before where() as S2",
"# peewee Model name ---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' #",
"of clustered structures, with age<7.5 dex and brighter than rp<15.5 mag. Wiki page:",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula",
"def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'),",
"> 1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5)) | ((Gaia_DR2.b < -5)",
"\" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0]",
"name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of",
"instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority",
"postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR excess).",
"selection but assigning cadence and faint limit for carton based on RP instead",
"subset of 2MASS # # table catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\" FOREIGN",
"limit for carton based on RP instead of H \"\"\" name = 'mwm_yso_cluster_boss'",
"(CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m,",
"bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and",
"RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss",
"\" + str(current_catalogid) + \";\") class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper (pre-)Main",
"+ current_instrument + \"'\" \" where catalogid = \" + str(current_catalogid) + \";\")",
"gaia and # can be joined to gaia_dr2_source via source_id. # # table",
"\"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs",
"version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m,",
"(all MIPSGAL targets have a counterpart in 2MASS, and all 2MASS have an",
"version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro",
"from the catalog of clustered structures, with age<7.5 dex and brighter than rp<15.5",
"receive more than one): cadence options for these targets: boss_bright_3x1 if RP<14.76 |",
"and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category = 'science' instrument =",
"RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5 and bp_rp between -0.2 and 1.1 and",
"the OB stars at the tip of the main sequence, brighter than H<13,",
"j, h, k, 3.6, 4.8, 8.0, 24 mag cadence options for these targets",
"= None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss",
"- Variable BOSS (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_boss old class name:",
"+ str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, APOGEE Shorthand",
"= 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self,",
"# Gaia_DR2 is not a subset of 2MASS # # table catalogdb.mipsgal #",
"shorthand name: mwm_yso_ob Simplified Description of selection criteria: Selecting the OB stars at",
"the text. # In words: # all the targets should be within 5",
"and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from mwm_yso_ob to request BOSS",
"mwm_yso_s3 to request BOSS observations, RP magnitude check added to the previous selection",
"on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id ==",
"one): Pseudo SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and",
"and (g.parallax < 0.2 or g.parallax is null) and ct.version_id = 13 and",
"carton based on RP instead of H \"\"\" name = 'mwm_yso_cluster_boss' category =",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main",
"boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5 and",
"from mipsgal m join twomass_psc t on twomass_name = designation join tic_v8 tic",
"TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro)",
"name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified Description of",
"True, TwoMassPSC.h_m < 13)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"few sources that can be located further south of the plane if l>180",
"of the sky: all the targets should be within 5 deg of the",
"peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best",
"> 0.02)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"(should have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered",
"cadence is set in post_process() program = 'mwm_yso' mapper = 'MWM' priority =",
"phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3",
"with J-H>1.1. Some contaminants from scanning are filtered on the plane of the",
"Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))",
"4.8, 8.0, 24 mag cadence options for these targets (list all options, even",
"few sources that can be # located further south of the plane if",
"spitzer fluxes from mipsgal. brighter than H<13, have color 8.0-24>2.5, and have parallax<0.2",
"TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24)",
"W1, W2, W3, W4,parallax cadence options for these targets (list all options, even",
"of peewee model names corresponding # to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class",
"query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query def",
"' + 'instrument = None, cadence= None') if current_instrument is not None: self.database.execute_sql(",
"= (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m,",
"on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13, YSO_Clustering.age",
"rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog Additional",
"< 13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro",
"catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main",
"Replace (b>-5 or l>180) and b<-5 as below based on the text. #",
"and have parallax<0.2 or lack a Gaia xmatch. (should have ~3.2K sources) Wiki",
"get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_disk_boss '",
"receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and bp_rp between -0.2",
"on RP instead of H \"\"\" name = 'mwm_yso_disk_boss' category = 'science' instrument",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible,",
"clustered catalog Additional cross-matching needed: Return columns: Gaia id, 2mass id, G, BP,",
"doesn't give us anything extra and it's a costly join. def build_query(self, version_id,",
".join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best",
"w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from mwm_yso_s1 to request BOSS observations,",
".join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id ==",
"h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category = 'science' instrument = 'APOGEE' cadence =",
"w4mpro is null) or (w3mpro is null and w4mpro is null and j_m-h_m>1.1)",
"(w2mpro-w3mpro>4 and w4mpro is null) or (w3mpro is null and w4mpro is null",
"# mipsgal is a subset of 2MASS # mipsgal can be joined to",
"shorthand name: mwm_yso_cluster Simplified Description of selection criteria: Selecting the clustered sources from",
"< 13 and (m.mag_8_0 - m.mag_24) > 2.5 and (g.parallax < 0.2 or",
"Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise",
"AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2",
"Embedded APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old shorthand",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\"",
"that can be # located further south of the plane if l>180 #",
"CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro -",
"- 1 < Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs)",
"| (MIPSGAL.glon < 2), (MIPSGAL.glat > -1) & (MIPSGAL.glat < 1), \"\"\" name",
"of YSOs, brighter than H<15, saturated (blank) W4 with W2-W3>4, or saturated W3",
"1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee model class corresponds to # table",
"not get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_variable_boss",
"(b>-5 or l>180) and b<-5 # S2_5 query below has the same part",
"current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set instrument",
"mipsgal m join twomass_psc t on twomass_name = designation join tic_v8 tic on",
"and ct.version_id = 13 and ct.best is true; Note you only need one",
".join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))",
"\"\"\"YSOs - Central Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton",
"2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >> None))) if query_region: query = (query",
"sequence optical variables). Shorthand name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old shorthand name:",
"else: # All cases should be covered above so we should not get",
"- 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error",
"one): Pseudo SQL (optional): Implementation: phot_g_mean_mag < 18.5 and h_m <13 and parallax",
"h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee'",
"MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old class name:",
"| ((AllWise.w3mpro >> None) & (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) >",
"0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag (should have ~52.7K",
"where m.hmag < 13 and (m.mag_8_0 - m.mag_24) > 2.5 and (g.parallax <",
"than one): Pseudo SQL (optional): Implementation: age<7.5 and h<13 \"\"\" name = 'mwm_yso_cluster_apogee'",
"lack a Gaia xmatch. (should have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
"query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog Shorthand name:",
"MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee old class name:",
"RP magnitude check added to the previous selection \"\"\" name = 'mwm_yso_variable_boss' category",
"has the same part before where() as S2 query. def build_query(self, version_id, query_region=None):",
"removed check on the position on the sky: Removed below condition. l is",
"can be joined to gaia_dr2_source via source_id. # # table catalogdb.yso_clustering # Foreign-key",
"< 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5 + 2.5",
"'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): # join",
"# Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss'",
"joined to gaia_dr2_source via source_id. # # table catalogdb.yso_clustering # Foreign-key constraints: #",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set instrument =",
"Shorthand name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified Description",
"on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))",
"can be located further south of the plane if l>180 (should have ~1.2K",
"None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \"",
"TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process(): ' + 'instrument = None, cadence= None')",
"\"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for",
"== Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m <",
"between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments:",
"BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2,",
"# See catalog.py for the name of peewee model names corresponding # to",
".switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag <",
"None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \"",
"Due to below, we do not need a between to Catalog and CatalogToTIC_v8",
"WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and brighter than H<13 (should",
"Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8,",
"age<7.5 dex and brighter than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"-1 and 1 (m.glon > 358 or m.glon < 2) and (m.glat >",
"< 13)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
"'MWM' priority = 2700 # yso_clustering is a subset of gaia and #",
"\" update sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument = '\" + current_instrument +",
"south of the plane if l>180 # Hence: # ((b>-5) and (b<5)) or",
"and b between -1 and 1 (m.glon > 358 or m.glon < 2)",
"Gaia_DR2 is a subset of TIC_v8 # # 2MASS is not a subset",
"version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,",
"search radius. Return columns: Gaia id, 2mass id, allwise id, G, BP, RP,",
"(optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro is null) or (w3mpro is null",
"'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 # yso_clustering is",
"boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL",
"Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise, gaia (allow sources that lack",
"we should not get here. current_instrument = None current_cadence = None raise TargetSelectionError('error",
"and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category = 'science' instrument =",
"Description of selection criteria: selection of YSOs, brighter than H<15, saturated (blank) W4",
"null) or (w3mpro is null and w4mpro is null and j_m-h_m>1.1) and (b>-5",
"criteria: Selecting the clustered sources from the catalog of vetted pre-main sequence stars",
"W3, W4,parallax cadence options for these targets (list all options, even though no",
"- Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New Simplified Description of selection",
"< 15.29): current_instrument = 'BOSS' current_cadence = 'bright_5x1' elif(current_rp < 15.5): current_instrument =",
"TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2,",
"one): Pseudo SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and",
"sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category = 'science' instrument",
"where catalogid = \" + str(current_catalogid) + \";\") if current_cadence is not None:",
"and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name",
"on the sky: Removed below condition. l is glon (galactic longitude) b is",
"RP instead of H \"\"\" name = 'mwm_yso_disk_boss' category = 'science' instrument =",
"of 2MASS # 2MASS is a subset of TIC_v8 # Gaia_DR2 is a",
"KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid,",
"in the central molecular zone based on spitzer fluxes from mipsgal. brighter than",
"< 15.075): current_instrument = 'BOSS' current_cadence = 'bright_4x1' elif(current_rp < 15.29): current_instrument =",
"TIC, but not all the TIC entries have a Gaia counterpart). Comments: Formerly",
"AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) * 0.8 +",
"current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process():",
"< 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from mwm_yso_ob to request BOSS observations, assigning",
"2mass id, G, BP, RP, J, H, K, parallax cadence options for these",
"name: mwm_yso_ob Simplified Description of selection criteria: Selecting the OB stars at the",
"brighter than H<13, fainter than G>15 or without gaia detection, colors J-H>0,5, W1-W2>0.5,",
"catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation) #",
"bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75",
"+ \" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid =",
"though no single target will receive more than one): 'apogee_bright_3x1' Pseudo SQL (optional):",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set instrument =",
"TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m",
"| in zari18pms) & rp<15.5 lead contact:<NAME> \"\"\" # peewee Model name --->",
"shorthand name: mwm_yso_s1 Simplified Description of selection criteria: selection of YSOs based on",
"Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int ==",
"/ Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power(",
"> -5) & (Gaia_DR2.b < 5)) | ((Gaia_DR2.b < -5) & (Gaia_DR2.l >",
"in l and 1 degree in b from the galactic center, (MIPSGAL.glon >",
"and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to",
"sandbox.temp_mwm_yso_variable_boss \" + \" set cadence = '\" + current_cadence + \"'\" \"",
"Comments: Split from Cluster to request BOSS observations, assigning cadence and faint limit",
"True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,",
"than parallax>0.3. Filter on the position of the HR diagram to select cool",
"1\" search radius. Return columns: Gaia id, 2mass id, allwise id, G, BP,",
"central molecular zone based on spitzer fluxes from mipsgal. brighter than H<13, have",
"brighter than H<13, closer than parallax>0.3. Filter on the position of the HR",
"than parallax>0.3, and brighter than H<13 (should have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category = 'science' instrument = None # instrument is",
"/ Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log(",
"OB APOGEE Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old",
"between TIC and Gaia (all MIPSGAL targets have a counterpart in 2MASS, and",
"photometry, and then the resulting selection was crossmatched against against Gaia with 1\"",
"sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1)",
"needed: the table has xmatch included Return columns: mipsgal id, 2mass id, j,",
"'mwm_yso_pms_boss' category = 'science' instrument = None # instrument is set in post_process()",
"will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and bp_rp between",
"of sources in the central molecular zone based on spitzer fluxes from mipsgal.",
"if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set",
"catalog.py for the name of peewee model names corresponding # to postgres table",
"if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation: (in sagitta",
"TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process(): ' + 'instrument = None, cadence= None')",
"Simplified Description of selection criteria: selection of YSOs, brighter than H<13, fainter than",
"check on the position on the sky: Removed below condition. l is glon",
"to below, we do not need a between to Catalog and CatalogToTIC_v8 #",
"the sky: all the targets should be within 5 deg of the plane+",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper",
"the coordinates back to epoch 2000.0 # (b>-5 or l>180) and b<-5 #",
"source catalogs needed: Gaia, 2mass, allwise Additional cross-matching needed: Note: Using the Gaia",
"5 deg of the plane+ few sources that can be located further south",
"< Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3,",
"are not the same as # values of Gaia_DR2.parallax. # Hence, in the",
"back to epoch 2000.0 # (b>-5 or l>180) and b<-5 # S2_5 query",
"== TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True,",
"< 14.76): current_instrument = 'BOSS' current_cadence = 'bright_3x1' elif(current_rp < 15.075): current_instrument =",
"= \" + str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone",
"> phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and",
"# \"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation) # # Due to below,",
"'\" + current_cadence + \"'\" \" where catalogid = \" + str(current_catalogid) +",
"ct.version_id = 13 and ct.best is true; Note you only need one left",
"\"\"\"YSOs - Variable BOSS (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_boss old class",
"in mwm_yso_ob_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if",
"2mass, gaia Additional cross-matching needed: Return columns: Gaia id, 2mass id, G, BP,",
"< 15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag",
"contaminants from scanning are filtered on the plane of the sky: all the",
"is not a subset of Gaia_DR2 # Gaia_DR2 is not a subset of",
"Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # join with",
"| boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5",
"a Gaia xmatch. (should have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"def build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m,",
"a subset of TIC_v8 # # 2MASS is not a subset of Gaia_DR2",
"= '\" + current_instrument + \"'\" \" where catalogid = \" + str(current_catalogid)",
">> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro",
"cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 |",
"query. def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key,",
"entries have a Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed check on the position",
"build_query(self, version_id, query_region=None): # join with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set",
"Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog Additional cross-matching needed:",
"Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is",
"K, parallax cadence options for these targets (list all options, even though no",
"+ \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" +",
"RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5",
"the clustered sources from the catalog of vetted pre-main sequence stars Wiki page:",
"from Gaia since # TIC propagates the coordinates back to epoch 2000.0 #",
"Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id ==",
"current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process():",
"# Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category =",
".join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13))",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS",
"'mwm_yso' mapper = 'MWM' priority = 2700 # yso_clustering is a subset of",
"Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc",
"~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia, 2mass, allwise Additional",
"based on RP instead of H \"\"\" name = 'mwm_yso_cluster_boss' category = 'science'",
"the catalog of vetted pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"class name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified Description of selection criteria: selection",
"query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h,",
"that lack gaia xmatch) Additional cross-matching needed: Note: Using the Gaia xmatch somehow",
"\" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid",
"name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified Description of",
"catalogs needed: Kounkel+20 clustered catalog Additional cross-matching needed: Return columns: Gaia id, 2mass",
"= 'mwm_yso_disk_boss' category = 'science' instrument = None # instrument is set in",
"options, even though no single target will receive more than one): apogee_bright_3x1 (for",
"# We are using the values from Gaia since # TIC propagates the",
"query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main sequence optical variables).",
"RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss",
"str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically invisible). Shorthand name:",
"& h<13 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres table name",
"or saturated W3 and W2, with J-H>1.1. Some contaminants from scanning are filtered",
"faint limit for carton based on RP instead of H \"\"\" name =",
"b<-5 # Replace (b>-5 or l>180) and b<-5 as below based on the",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument = '\" + current_instrument",
"elif(current_rp < 15.075): current_instrument = 'BOSS' current_cadence = 'bright_4x1' elif(current_rp < 15.29): current_instrument",
"and (b>-5 or l>180) and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category = 'science'",
"Gaia_DR2 # Gaia_DR2 is not a subset of 2MASS # # table catalogdb.mipsgal",
"TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat)",
"below based on the text. # In words: # all the targets should",
"= 'mwm_yso' mapper = 'MWM' priority = 2700 # Above implementation has below",
"0.3, Gaia_DR2.bp_rp * 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 /",
"peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95),",
"query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR excess). Shorthand name: mwm_yso_disk_boss old",
"part before where() as S2 query. def build_query(self, version_id, query_region=None): query = (AllWise",
"SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\"",
"1), \"\"\" name = 'mwm_yso_cmz_apogee' category = 'science' instrument = 'APOGEE' cadence =",
"Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old shorthand name:",
"@Author: <NAME> (<EMAIL>) # @Date: 2020-06-10 # @Filename: mwm_yso.py # @License: BSD 3-clause",
"MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee old class",
"/ Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from",
"cross-matching needed: the table has xmatch included Return columns: mipsgal id, 2mass id,",
"on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) #",
"pweewee model class corresponds to # table catalogdb.gaia_dr2_source. # # All values of",
"MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old class",
"if l>180 (should have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"all the cartons # since catalogid is completely unique (even across different version_id)",
"Split from mwm_yso_ob to request BOSS observations, assigning cadence and faint limit for",
"14.76): current_instrument = 'BOSS' current_cadence = 'bright_3x1' elif(current_rp < 15.075): current_instrument = 'BOSS'",
"update sandbox.temp_mwm_yso_pms_boss \" + \" set instrument = '\" + current_instrument + \"'\"",
"mipsgal can be joined to twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation. # Then",
"(m.glon > 358 or m.glon < 2) and (m.glat > -1 and m.glat",
"# @Filename: mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import",
"w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category = 'science' instrument",
"zone based on spitzer fluxes from mipsgal. brighter than H<13, have color 8.0-24>2.5,",
"sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11<",
"needed: Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K,",
"MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax",
"< 13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag",
"& (AllWise.w4mpro >> None)) | ((AllWise.w3mpro >> None) & (AllWise.w4mpro >> None) &",
"Gaia (all MIPSGAL targets have a counterpart in 2MASS, and all 2MASS have",
"0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro -",
"query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main sequence optical variables). Shorthand name:",
"13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >> None)))",
"not the same as # values of Gaia_DR2.parallax. # Hence, in the above",
"tip of the main sequence, brighter than rp<15.5, G<18 mag, closer than parallax>0.3,",
"2MASS is not a subset of Gaia_DR2 # Gaia_DR2 is not a subset",
"Shorthand name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description",
"- AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1)) if query_region: query",
".join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5,",
"main sequence, brighter than H<13, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and",
".join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag < 13,",
"i in range(len(output)): current_catalogid = output[i][0] current_rp = output[i][1] if(current_rp < 14.76): current_instrument",
"\"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation) # # Due to below, we",
"RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss",
"(l > 180)) # l, b in Gaia_DR2 are gallong and gallat in",
"< 1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3)) if query_region: query =",
"+ str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically invisible). Shorthand",
"\" update sandbox.temp_mwm_yso_variable_boss \" + \" set instrument = '\" + current_instrument +",
"SQL (optional): Implementation: h_m<13 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and",
"TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation))",
"twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation. # Then join via TIC and catalog_to_tic.",
"would be: select ct.catalogid from mipsgal m join twomass_psc t on twomass_name =",
"receive more than one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and",
"name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of selection criteria: selection of",
"will receive more than one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5",
"catalog of vetted pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"yso_clustering is a subset of gaia and # can be joined to gaia_dr2_source",
"to Catalog and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid # We can remove the",
"TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag",
"str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \"",
"(optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3",
"= 'mwm_yso_pms_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"or l>180) and b<-5 as below based on the text. # In words:",
"should not get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in",
"zari18pms) & rp<15.5 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres table",
"to # table catalogdb.gaia_dr2_source. # # All values of TIC_v8.plx (for non-null entries)",
"sandbox.temp_mwm_yso_disk_boss \" + \" set instrument = '\" + current_instrument + \"'\" \"",
"name = 'mwm_yso_variable_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and brighter than H<13 (should have ~21.5K sources)",
"(g.parallax < 0.2 or g.parallax is null) and ct.version_id = 13 and ct.best",
"h_m<13 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2",
"version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # | is for peewee SQL",
"set in post_process() cadence = None # cadence is set in post_process() program",
"# | is for peewee SQL union query = query1 | query2 if",
"# join with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'),",
"table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR excess). Shorthand",
"\"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\")",
"Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc ==",
"in all the cartons # since catalogid is completely unique (even across different",
"TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5)",
"0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro -",
"| boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: age<7.5",
"gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for i in",
"null and j_m-h_m>1.1) and (b>-5 or l>180) and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee'",
"/ Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error",
"on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >>",
"sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp",
"and gallat in TIC_v8. # We are using the values from Gaia since",
"or l>180) and b<-5 # Replace (b>-5 or l>180) and b<-5 as below",
"and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category = 'science' instrument = 'APOGEE' cadence",
"13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag -",
"plane if l>180 # Hence: # ((b>-5) and (b<5)) or ((b<-5) and (l",
"= 'mwm_yso_cluster_boss' category = 'science' instrument = None # instrument is set in",
"< 13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) >",
"< -5) & (Gaia_DR2.l > 180)) | ((Gaia_DR2.b >> None) & (Gaia_DR2.l >>",
"the plane+ # few sources that can be # located further south of",
"can be joined to twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation. # Then join",
"import BaseCarton from target_selection.exceptions import TargetSelectionError # See catalog.py for the name of",
"2700 def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key,",
"are using the values from Gaia since # TIC propagates the coordinates back",
"update sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence = '\" + current_cadence + \"'\"",
"w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from mwm_yso_s1 to request BOSS observations, same color",
"Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5,",
"name = 'mwm_yso_nebula_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"a subset of 2MASS # mipsgal can be joined to twomass_psc via #",
"\" set cadence = '\" + current_cadence + \"'\" \" where catalogid =",
"version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag,",
"+ \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" +",
"source catalogs needed: 2mass, gaia Additional cross-matching needed: Return columns: Gaia id, 2mass",
"on the position on the sky: Removed below condition. l is glon (galactic",
"None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b <",
"== TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True,",
"will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5 or",
"id, 2mass id, allwise id, G, BP, RP, J, H, K, W1, W2,",
".switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, MIPSGAL.hmag <",
"than H<13, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have",
"parallax>0.3 Comments: Split from mwm_yso_ob to request BOSS observations, assigning cadence and faint",
"the cartons # since catalogid is completely unique (even across different version_id) #",
"completely unique (even across different version_id) # so the join with Catalog doesn't",
"Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id ==",
"needed: Note: Using the Gaia xmatch somehow misses half the sources. Selection was",
"though no single target will receive more than one): boss_bright_3x1 if RP<14.76 |",
"with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,",
"| boss_bright_6x1 if RP<15.5 Implementation: (in sagitta | in zari18pms) & rp<15.5 lead",
"< H < 13) Implementation: (in sagitta | in zari18pms) & h<13 lead",
"'mwm_yso_embedded_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New Simplified",
"than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category = 'science' instrument = 'APOGEE' cadence =",
"is glon (galactic longitude) b is glat (galactic latitude) All four statements below",
"fluxes from mipsgal. brighter than H<13, have color 8.0-24>2.5, and have parallax<0.2 or",
"(peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs)",
"program = 'mwm_yso' mapper = 'MWM' priority = 2700 # mipsgal is a",
"Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old shorthand name:",
"Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc ==",
"CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1),",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set cadence = '\" + current_cadence",
"in which M_x is the absolute mag (should have ~52.7K sources) Wiki page:",
"range(len(output)): current_catalogid = output[i][0] current_rp = output[i][1] if(current_rp < 14.76): current_instrument = 'BOSS'",
"< 15.5)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
"gallong and gallat in TIC_v8. # We are using the values from Gaia",
"| boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5",
"Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name =",
"and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category = 'science'",
"will receive more than one): cadence options for these targets: boss_bright_3x1 if RP<14.76",
"parallax cadence options for these targets (list all options, even though no single",
"from scanning are filtered on the plane of the sky: all the targets",
"Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id",
"= 'mwm_yso_variable_boss' category = 'science' instrument = None # instrument is set in",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs",
"\" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs -",
"MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton",
"Pseudo SQL (optional): Implementation: phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3",
"# https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee",
"name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified Description of selection criteria: selection of",
"- AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) * 0.8",
"all 2MASS have an entry in TIC, but not all the TIC entries",
"W3-W4>1.5, closer than parallax>0.3, and brighter than H<13 (should have ~21.5K sources) Wiki",
"YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2,",
".switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag <",
"= tic.id where m.hmag < 13 and (m.mag_8_0 - m.mag_24) > 2.5 and",
"and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\"",
"null) and ct.version_id = 13 and ct.best is true; Note you only need",
"True, MIPSGAL.hmag < 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2) |",
"on spitzer fluxes from mipsgal. brighter than H<13, have color 8.0-24>2.5, and have",
"be # located further south of the plane if l>180 # Hence: #",
"\" + str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update",
"(b<5)) or ((b<-5) and (l > 180)) # l, b in Gaia_DR2 are",
"Gaia xmatch somehow misses half the sources. Selection was done on the allwise",
"we cannot use TIC_v8.plx instead # of Gaia_DR2.parallax. if query_region: query = (query",
"output[i][0] current_rp = output[i][1] if(current_rp < 14.76): current_instrument = 'BOSS' current_cadence = 'bright_3x1'",
"phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5",
"name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description of",
"if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set",
"MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old class",
"True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag <",
"name: mwm_yso_s2 Simplified Description of selection criteria: selection of YSOs, brighter than H<13,",
"5 + 11 < Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),",
"HR diagram to select cool pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring",
"~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional cross-matching",
"(IR excess). Shorthand name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1",
"\" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs -",
"Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int ==",
"plane+ few sources that can be located further south of the plane if",
"of selection criteria: selection of YSOs brighter than H<13, closer than parallax>0.3. Filter",
"so we should not get here. current_instrument = None current_cadence = None raise",
"condition. l is glon (galactic longitude) b is glat (galactic latitude) All four",
"and then the resulting selection was crossmatched against against Gaia with 1\" search",
"15.075): current_instrument = 'BOSS' current_cadence = 'bright_4x1' elif(current_rp < 15.29): current_instrument = 'BOSS'",
"== Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id",
"selection criteria: selection of YSOs brighter than H<13, closer than parallax>0.3. Filter on",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Cluster APOGEE",
"sandbox.temp_mwm_yso_pms_boss \" + \" set cadence = '\" + current_cadence + \"'\" \"",
"in 2MASS, and all 2MASS have an entry in TIC, but not all",
".switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id ==",
"\"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss ;\")",
"# Then join via TIC and catalog_to_tic. # # mipsgal is a subset",
"https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia, 2mass, allwise Additional cross-matching needed: Note: Using",
"(b>-5 or l>180) and b<-5 # Replace (b>-5 or l>180) and b<-5 as",
"cross-matching needed: Note: Using the Gaia xmatch somehow misses half the sources. Selection",
"peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering,",
".switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m <",
"python # -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date:",
"tip of the main sequence, brighter than H<13, G<18 mag, closer than parallax>0.3,",
"BOSS (IR excess). Shorthand name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old shorthand name:",
"CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13, YSO_Clustering.age < 7.5)) if query_region: query =",
"build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag,",
"version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro",
"saturated (blank) W4 with W2-W3>4, or saturated W3 and W2, with J-H>1.1. Some",
"raise TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process(): ' + 'instrument = None, cadence=",
"sources that lack gaia xmatch) Additional cross-matching needed: Note: Using the Gaia xmatch",
"- MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >> None))) if query_region:",
"Gaia_DR2 are gallong and gallat in TIC_v8. # We are using the values",
"RP, J, H, K, W1, W2, W3, W4,parallax cadence options for these targets",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs -",
"on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag",
"\"\"\" name = 'mwm_yso_cluster_boss' category = 'science' instrument = None # instrument is",
"= 'BOSS' current_cadence = 'bright_6x1' else: # All cases should be covered above",
"have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal Additional cross-matching",
"True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro) > 4) & (AllWise.w4mpro >> None))",
"Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))",
"TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m",
"\"\"\"YSOs - OB APOGEE Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old class name:",
"color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
">> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag",
"sql query would be: select ct.catalogid from mipsgal m join twomass_psc t on",
"S2 query. def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
"name = 'mwm_yso_cmz_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"Selecting the clustered sources from the catalog of clustered structures, with age<7.5 dex",
"post_process() program = 'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self, version_id,",
"Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))",
"against against Gaia with 1\" search radius. Return columns: Gaia id, 2mass id,",
"these targets (list all options, even though no single target will receive more",
"values of Gaia_DR2.parallax. # Hence, in the above query, we cannot use TIC_v8.plx",
"query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main sequence optical variables). Shorthand name:",
"of the main sequence, brighter than rp<15.5, G<18 mag, closer than parallax>0.3, color",
"(AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro)",
"Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category = 'science'",
"allwise Additional cross-matching needed: Note: Using the Gaia xmatch somehow misses half the",
"mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of selection",
"(phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1",
"Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER,",
"name: mwm_yso_s3 Simplified Description of selection criteria: selection of YSOs brighter than H<13,",
"2700 # Above implementation has below clause # and (b>-5 or l>180) and",
"'\" + current_instrument + \"'\" \" where catalogid = \" + str(current_catalogid) +",
"= 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso' mapper =",
"version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # join with Zari18pms query2 =",
"to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR",
"instrument = '\" + current_instrument + \"'\" \" where catalogid = \" +",
"boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and",
"targets (list all options, even though no single target will receive more than",
"All four statements below are equivalent. (l> 358 or l< 2) and b",
"7 < H < 13) Implementation: (in sagitta | in zari18pms) & h<13",
"TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id",
"' + 'post_process(): ' + 'instrument = None, cadence= None') if current_instrument is",
"which M_x is the absolute mag (should have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"are equivalent. (l> 358 or l< 2) and b between -1 and 1",
"2) and b between -1 and 1 (m.glon > 358 or m.glon <",
"no single target will receive more than one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation:",
"peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error <",
"APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old shorthand name:",
"MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2,",
"== YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age <",
"Simplified Description of selection criteria: selection of YSOs, brighter than H<15, saturated (blank)",
"query_region[2]))) return query def post_process(self, model): \"\"\" cadence options for these targets: boss_bright_3x1",
"/ Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error >",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton):",
"costly join. def build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key,",
"and _8_0_-_24_>2.5 and (parallax<0.2 or parallax is null) For CMZ, the raw sql",
"selection of sources in the central molecular zone based on spitzer fluxes from",
"11 < Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp >",
"+ \" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid =",
"page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, allwise Additional cross-matching needed: Return columns:",
"# TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category = 'science' instrument = None # instrument",
"b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category = 'science' instrument = 'APOGEE' cadence =",
"(for non-null entries) are not the same as # values of Gaia_DR2.parallax. #",
"colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki",
"utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2020-06-10 # @Filename: mwm_yso.py",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton):",
"< 18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) < 1.6",
"'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query =",
"'BOSS' current_cadence = 'bright_6x1' else: # All cases should be covered above so",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5) |",
"of H \"\"\" name = 'mwm_yso_ob_boss' category = 'science' instrument = None #",
"with 1\" search radius. Return columns: Gaia id, 2mass id, allwise id, G,",
"with age<7.5 dex and brighter than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
"catalogdb.zari18pms Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K,",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main",
"'MWM' priority = 2700 # mipsgal is a subset of 2MASS # mipsgal",
"\";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments:",
"query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, BOSS Shorthand name:",
"1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1)",
"\" + str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, APOGEE",
"closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki page:",
"13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag - 5",
"/ Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 < Gaia_DR2.phot_bp_mean_mag",
"* Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3)) if query_region: query = (query .join_from(CatalogToTIC_v8,",
"g.parallax is null) and ct.version_id = 13 and ct.best is true; Note you",
".join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5,",
"str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \"",
"all options, even though no single target will receive more than one): cadence",
"will receive more than one): apogee_bright_3x1 (for 7 < H < 13) Implementation:",
"<13 and parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and",
"TwoMassPSC.h_m < 13)) # | is for peewee SQL union query = query1",
"M_x is the absolute mag (should have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main sequence optical",
"= 2700 # yso_clustering is a subset of gaia and # can be",
"== YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13, YSO_Clustering.age <",
"name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of selection criteria: Selecting the",
"(Gaia_DR2.b < 5)) | ((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180)) | ((Gaia_DR2.b",
"the sky: Removed below condition. l is glon (galactic longitude) b is glat",
"15.5)) # | is for peewee SQL union query = query1 | query2",
"== Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join",
"< 13, (MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >>",
"cadence = None # cadence is set in post_process() program = 'mwm_yso' mapper",
"# # mipsgal is a subset of 2MASS # 2MASS is a subset",
"\"\"\"YSOs - Disk APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton",
"mwm_yso_ob to request BOSS observations, assigning cadence and faint limit for carton based",
"None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \"",
"W3, W4 cadence options for these targets (list all options, even though no",
"Pseudo SQL (optional): Implementation: age<7.5 and h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category =",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query def post_process(self, model):",
"gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for i in",
"> 0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro",
"have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, allwise Additional",
"request BOSS observations, same color selection but assigning cadence and faint limit for",
"xmatch somehow misses half the sources. Selection was done on the allwise catalog",
"relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x is the",
"TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton from target_selection.exceptions import TargetSelectionError #",
"self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall()",
"return query def post_process(self, model): \"\"\" cadence options for these targets: boss_bright_3x1 if",
"columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax cadence",
"mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of selection",
"~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, allwise Additional cross-matching",
"Model name ---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' #",
"* (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) < 1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax",
"lead contact:<NAME> \"\"\" # peewee Model name ---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source'",
"# # All values of TIC_v8.plx (for non-null entries) are not the same",
"post_process(self, model): \"\"\" cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1",
"# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog,",
"| boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5",
"current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set instrument",
"the allwise catalog that had 2mass photometry, and then the resulting selection was",
"version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp <",
"sandbox.temp_mwm_yso_pms_boss \" + \" set instrument = '\" + current_instrument + \"'\" \"",
"# Due to below, we do not need a between to Catalog and",
"< 15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro",
"> -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5 *",
"\";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \"",
"peewee model names corresponding # to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton):",
"self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall()",
"| query2 if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"observations, assigning cadence and faint limit for carton based on RP instead of",
"'mwm_yso' mapper = 'MWM' priority = 2700 # Above implementation has below clause",
"and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"l>180) and b<-5 # S2_5 query below has the same part before where()",
"instead of H \"\"\" name = 'mwm_yso_disk_boss' category = 'science' instrument = None",
"(BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in",
"| boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5 and bp_rp between -0.2",
"for the name of peewee model names corresponding # to postgres table names:",
"query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss old class",
"between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\"",
"mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of selection",
"Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id))",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5)) if",
"(should have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise, gaia",
"'mwm_yso_ob_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, allwise Additional cross-matching needed:",
"J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki page:",
"class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee old",
"'mwm_yso_ob_boss' category = 'science' instrument = None # instrument is set in post_process()",
"instrument = None # instrument is set in post_process() cadence = None #",
"the plane if l>180 (should have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
"= None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss",
"TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2,",
"catalog Additional cross-matching needed: Return columns: Gaia id, 2mass id, G, BP, RP,",
"= self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss ;\") output =",
"name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of",
"APOGEE Catalog Shorthand name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster",
"> 0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee",
"cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" +",
"Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h <",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id))",
"== TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id",
"the table has xmatch included Return columns: mipsgal id, 2mass id, j, h,",
".join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id",
"self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall()",
"name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified Description of selection criteria: selection of",
"and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and",
"color 8.0-24>2.5, and have parallax<0.2 or lack a Gaia xmatch. (should have ~3.2K",
"H<13, fainter than G>15 or without gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5,",
"Additional source catalogs needed: Kounkel+20 clustered catalog Additional cross-matching needed: Return columns: Gaia",
"and b<-5 as below based on the text. # In words: # all",
"'BOSS' current_cadence = 'bright_3x1' elif(current_rp < 15.075): current_instrument = 'BOSS' current_cadence = 'bright_4x1'",
"sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise, gaia (allow sources that",
"2 degrees in l and 1 degree in b from the galactic center,",
"or ((b<-5) and (l > 180)) # l, b in Gaia_DR2 are gallong",
"== version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # | is for peewee",
"+ 'instrument = None, cadence= None') if current_instrument is not None: self.database.execute_sql( \"",
"15.5): current_instrument = 'BOSS' current_cadence = 'bright_6x1' else: # All cases should be",
"Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old shorthand name:",
"and Gaia (all MIPSGAL targets have a counterpart in 2MASS, and all 2MASS",
"current_rp = output[i][1] if(current_rp < 14.76): current_instrument = 'BOSS' current_cadence = 'bright_3x1' elif(current_rp",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR excess).",
"even though no single target will receive more than one): 'apogee_bright_3x1' Pseudo SQL",
"gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for i in",
"= 2700 # mipsgal is a subset of 2MASS # mipsgal can be",
"AllWise.w4mpro) > 1.50, Gaia_DR2.parallax > 0.3)) # Gaia_DR2 pweewee model class corresponds to",
"G, BP, RP, J, H, K, W1, W2, W3, W4 cadence options for",
"l>180 (should have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass,",
"phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category = 'science' instrument",
"# -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2020-06-10",
"# located further south of the plane if l>180 # Hence: # ((b>-5)",
"selection criteria: selection of YSOs, brighter than H<13, fainter than G>15 or without",
"boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation: (in sagitta | in zari18pms)",
".join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best",
"/ Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /",
"is a subset of 2MASS # 2MASS is a subset of TIC_v8 #",
"variables). Shorthand name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified",
"/ Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) *",
"target will receive more than one): cadence options for these targets: boss_bright_3x1 if",
"h_m <13 and parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1)",
"of the HR diagram to select cool pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G,",
"\"\"\" name = 'mwm_yso_variable_boss' category = 'science' instrument = None # instrument is",
"Gaia_DR2 is not a subset of 2MASS # # table catalogdb.mipsgal # Foreign-key",
"between -1 and 1 (m.glon > 358 or m.glon < 2) and (m.glat",
"Simplified Description of selection criteria: selection of sources in the central molecular zone",
"WISE saturated). Shorthand name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5",
"on tic.twomass_psc = t.designation left outer join gaia_dr2_source g on g.source_id = tic.gaia_int",
"one): Pseudo SQL (optional): Implementation: h_m<13 and bp_rp between -0.2 and 1.1 and",
"# Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self,",
"on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro",
"((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5)) |",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set cadence = '\" +",
"peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best",
"are within 2 degrees in l and 1 degree in b from the",
"join gaia_dr2_source g on g.source_id = tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id =",
"(b>-5 or l>180) and b<-5 as below based on the text. # In",
"catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id, 2mass id, G, BP, RP,",
"MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR excess). Shorthand name: mwm_yso_disk_boss old class name:",
"sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp",
"via source_id. # # table catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY",
"position of the HR diagram to select cool pre-main sequence stars, with BP-RP>13,",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence = '\"",
"will receive more than one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 |",
"= designation join tic_v8 tic on tic.twomass_psc = t.designation left outer join gaia_dr2_source",
"in post_process() cadence = None # cadence is set in post_process() program =",
"query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee",
"((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None)))) if query_region: query = (query .join_from(CatalogToTIC_v8,",
"(w3mpro is null and w4mpro is null and j_m-h_m>1.1) and (b>-5 or l>180)",
"(IR excess). Shorthand name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1",
"BOSS Shorthand name: mwm_yso_pms_boss Comments: New, Split from PMS Simplified Description of selection",
"post_process() cadence = None # cadence is set in post_process() program = 'mwm_yso'",
"south of the plane if l>180 (should have ~1.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"single target will receive more than one): Pseudo SQL (optional): Implementation: phot_g_mean_mag <",
"< 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category = 'science' instrument =",
"priority = 2700 # mipsgal is a subset of 2MASS # mipsgal can",
"boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1",
"update sandbox.temp_mwm_yso_disk_boss \" + \" set instrument = '\" + current_instrument + \"'\"",
"RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation: (in sagitta |",
"mwm_yso_pms_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if current_instrument",
"criteria: selection of YSOs, brighter than H<15, saturated (blank) W4 with W2-W3>4, or",
"words: # all the targets should be within 5 deg of the plane+",
"query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk BOSS (IR excess). Shorthand",
"brighter than H<13 (should have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs",
"subset of 2MASS # mipsgal can be joined to twomass_psc via # mipsgal.twomass_name",
"# join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'),",
"and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category = 'science' instrument = 'APOGEE' cadence",
"Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24,",
"current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set instrument",
"None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \"",
"TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro) > 4) & (AllWise.w4mpro >> None)) |",
"if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m",
"peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11",
"return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible, WISE saturated). Shorthand name:",
"& (Gaia_DR2.l >> None)))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"mwm_yso_variable_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if current_instrument",
"Formerly mwm_yso_cmz, removed check on the position on the sky: Removed below condition.",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS",
"sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional cross-matching needed:",
"< 0.2) | (Gaia_DR2.parallax >> None))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
"and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split",
"we do not need a between to Catalog and CatalogToTIC_v8 # Catalog.catalogid ==",
"in g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of var_g<var_bp<var_g^0.75,",
"criteria: selection of YSOs brighter than H<13, closer than parallax>0.3. Filter on the",
"RP instead of H \"\"\" name = 'mwm_yso_ob_boss' category = 'science' instrument =",
"target will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and bp_rp",
"\"\"\"YSOs - OB BOSS Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old class name:",
"0.8 + 1.1)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
">> None)))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set",
"'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 # Above implementation",
"sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal Additional cross-matching needed: the",
"further south of the plane if l>180 # Hence: # ((b>-5) and (b<5))",
"single target will receive more than one): Pseudo SQL (optional): Implementation: age<7.5 and",
"> 180)) | ((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None)))) if query_region: query",
"TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))",
"mapper = 'MWM' priority = 2700 # Above implementation has below clause #",
"= query1 | query2 if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"further south of the plane if l>180 (should have ~1.2K sources) Wiki page:",
"YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id",
"'BOSS' current_cadence = 'bright_4x1' elif(current_rp < 15.29): current_instrument = 'BOSS' current_cadence = 'bright_5x1'",
"+ 1.1)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"criteria: selection of YSOs, brighter than H<13, fainter than G>15 or without gaia",
"the plane+ few sources that can be located further south of the plane",
"mwm_yso_ob Simplified Description of selection criteria: Selecting the OB stars at the tip",
"pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return",
"G, BP, RP, J, H, K, parallax cadence options for these targets (list",
"= output[i][1] if(current_rp < 14.76): current_instrument = 'BOSS' current_cadence = 'bright_3x1' elif(current_rp <",
"\" + \" set instrument = '\" + current_instrument + \"'\" \" where",
"rp<15.5 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2",
"subset of 2MASS # 2MASS is a subset of TIC_v8 # Gaia_DR2 is",
"receive more than one): Pseudo SQL (optional): Implementation: phot_g_mean_mag < 18.5 and h_m",
"None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \"",
"assigning cadence and faint limit for carton based on RP instead of H",
"Gaia_DR2.source_id)) .join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag <",
".join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id ==",
"# cadence is set in post_process() program = 'mwm_yso' mapper = 'MWM' priority",
"Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (Gaia_DR2.bp_rp > -0.2)",
"more than one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2",
"cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" +",
"RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation:",
"do not need a between to Catalog and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid",
"+ str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss",
"def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'),",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # | is for",
"m join twomass_psc t on twomass_name = designation join tic_v8 tic on tic.twomass_psc",
"though no single target will receive more than one): cadence options for these",
"+ \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, APOGEE Shorthand name: mwm_yso_pms_apogee",
"'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700",
"W2, with J-H>1.1. Some contaminants from scanning are filtered on the plane of",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton):",
"receive more than one): Pseudo SQL (optional): Implementation: age<7.5 and h<13 \"\"\" name",
"+ \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee",
"current_cadence = None raise TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process(): ' + 'instrument",
"FOREIGN KEY (twomass_name) # REFERENCES twomass_psc(designation) # # Due to below, we do",
"* 5 + 11 < Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) -",
"CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax",
"= None current_cadence = None raise TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process(): '",
"= 'mwm_yso_pms_boss' category = 'science' instrument = None # instrument is set in",
"We can remove the join with Catalog in all the cartons # since",
"query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j,",
"sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category = 'science' instrument = 'APOGEE' cadence =",
"options, even though no single target will receive more than one): 'apogee_bright_3x1' Pseudo",
"Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id,",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set cadence =",
"saturated W3 and W2, with J-H>1.1. Some contaminants from scanning are filtered on",
"and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category = 'science'",
"= 'BOSS' current_cadence = 'bright_5x1' elif(current_rp < 15.5): current_instrument = 'BOSS' current_cadence =",
"Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int",
"sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp",
"in mwm_yso_disk_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if",
"https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id, 2mass id,",
"< 13) Implementation: (in sagitta | in zari18pms) & h<13 lead contact:<NAME> \"\"\"",
"select cool pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs",
"TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag",
"H \"\"\" name = 'mwm_yso_ob_boss' category = 'science' instrument = None # instrument",
"if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: rp<15.5 and bp_rp",
"of selection criteria: selection of sources in the central molecular zone based on",
"Additional source catalogs needed: 2mass+allwise, gaia (allow sources that lack gaia xmatch) Additional",
"query2 if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"the name of peewee model names corresponding # to postgres table names: #",
"RP, J, H, K, parallax cadence options for these targets: boss_bright_3x1 if RP<14.76",
"dex and brighter than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"Description of selection criteria: Selecting the clustered sources from the catalog of vetted",
"H < 13) Implementation: (in sagitta | in zari18pms) & h<13 lead contact:<NAME>",
"since # TIC propagates the coordinates back to epoch 2000.0 # (b>-5 or",
"Description of selection criteria: Selecting the OB stars at the tip of the",
"Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5 +",
"and (b>-5 or l>180) and b<-5 # Replace (b>-5 or l>180) and b<-5",
"W3 and W2, with J-H>1.1. Some contaminants from scanning are filtered on the",
"0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
"RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: age<7.5 and rp<15.5 Comments:",
"MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified Description of selection criteria: selection of sources",
".join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id,",
"query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, BOSS Shorthand",
"a subset of 2MASS # 2MASS is a subset of TIC_v8 # Gaia_DR2",
"# (b>-5 or l>180) and b<-5 # S2_5 query below has the same",
"Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 < Gaia_DR2.phot_bp_mean_mag -",
"SQL (optional): Implementation: age<7.5 and h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category = 'science'",
"G, BP, RP, J, H, K, parallax cadence options for these targets: boss_bright_3x1",
"glon (galactic longitude) b is glat (galactic latitude) All four statements below are",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument = '\"",
"query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),",
"TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER,",
"l>180) and b<-5 # Replace (b>-5 or l>180) and b<-5 as below based",
"than rp<15.5, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set instrument = '\"",
"on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8)",
"needed: 2mass, gaia Additional cross-matching needed: Return columns: Gaia id, 2mass id, G,",
"than one): Pseudo SQL (optional): Implementation: phot_g_mean_mag < 18.5 and h_m <13 and",
"clustered sources from the catalog of clustered structures, with age<7.5 dex and brighter",
"at the tip of the main sequence, brighter than H<13, G<18 mag, closer",
"# @Author: <NAME> (<EMAIL>) # @Date: 2020-06-10 # @Filename: mwm_yso.py # @License: BSD",
"(m.glat > -1 and m.glat < 1) and Sources are within 2 degrees",
"MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton",
"+ \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" +",
"H<13, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K",
"target will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4",
"and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and",
"> 0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro",
"---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' #",
"somehow misses half the sources. Selection was done on the allwise catalog that",
"= (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m,",
"id, allwise id, G, BP, RP, J, H, K, W1, W2, W3, W4,parallax",
"if RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation: (in sagitta | in zari18pms) &",
"'bright_5x1' elif(current_rp < 15.5): current_instrument = 'BOSS' current_cadence = 'bright_6x1' else: # All",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs -",
"from \" + \" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for i in range(len(output)):",
"> -1 and m.glat < 1) and Sources are within 2 degrees in",
"selection criteria: Selecting the clustered sources from the catalog of vetted pre-main sequence",
"and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category = 'science'",
"\" update sandbox.temp_mwm_yso_ob_boss \" + \" set cadence = '\" + current_cadence +",
"< 1), \"\"\" name = 'mwm_yso_cmz_apogee' category = 'science' instrument = 'APOGEE' cadence",
"build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'),",
"from Cluster to request BOSS observations, assigning cadence and faint limit for carton",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Disk_BOSS_Carton(BaseCarton): \"\"\"YSOs - Disk",
"RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m <13",
"gaia_dr2_source via source_id. # # table catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN",
"| (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) >",
"t.designation left outer join gaia_dr2_source g on g.source_id = tic.gaia_int join catalog_to_tic_v8 ct",
"AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2,",
".join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise ==",
"== TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True,",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set instrument =",
"# yso_clustering is a subset of gaia and # can be joined to",
"- AllWise.w3mpro) > 4) & (AllWise.w4mpro >> None)) | ((AllWise.w3mpro >> None) &",
"cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" +",
"on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) #",
"< 15.5)) # | is for peewee SQL union query = query1 |",
"True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3,",
"Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id,",
"W4 with W2-W3>4, or saturated W3 and W2, with J-H>1.1. Some contaminants from",
"not get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_ob_boss",
"in range(len(output)): current_catalogid = output[i][0] current_rp = output[i][1] if(current_rp < 14.76): current_instrument =",
"None)))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified Description of",
"= self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss ;\") output =",
"where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\" YSOs -",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton): \"\"\"YSOs -",
"Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id ==",
"Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or parallax is null) For CMZ, the",
"MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of selection criteria: selection of YSOs",
"will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and",
"str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \"",
"from the galactic center, (MIPSGAL.glon > 358) | (MIPSGAL.glon < 2), (MIPSGAL.glat >",
"if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5",
"raise TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process(): ' + 'instrument = None, cadence=",
"== Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs -",
"parallax>0.3 Comments: Split from mwm_yso_s1 to request BOSS observations, same color selection but",
"catalogdb.gaia_dr2_source. # # All values of TIC_v8.plx (for non-null entries) are not the",
"to the previous selection \"\"\" name = 'mwm_yso_variable_boss' category = 'science' instrument =",
"< 2) and (m.glat > -1 and m.glat < 1) and Sources are",
"== version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp",
"+ 'post_process(): ' + 'instrument = None, cadence= None') if current_instrument is not",
"apogee_bright_3x1 (for 7 < H < 13) Implementation: (in sagitta | in zari18pms)",
"5 deg of the plane+ # few sources that can be # located",
"TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process(): ' + 'instrument = None, cadence= None')",
"= 'MWM' priority = 2700 def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid,",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query def post_process(self, model): \"\"\"",
"All values of TIC_v8.plx (for non-null entries) are not the same as #",
"AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5)) | ((Gaia_DR2.b <",
"remove the join with Catalog in all the cartons # since catalogid is",
"phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error< power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95)",
"and brighter than H<13 (should have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
"and (phot_g_mean_mag>18.5 or phot_g_mean_mag is null) and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and",
"(pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob",
"8.0, 24 mag cadence options for these targets (list all options, even though",
"if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set",
"BOSS Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old shorthand",
">> True, YSO_Clustering.h < 13, YSO_Clustering.age < 7.5)) if query_region: query = (query",
"== Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True,",
".join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs -",
"MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified Description of selection criteria: selection of YSOs,",
"of selection criteria: selection of YSOs, brighter than H<13, fainter than G>15 or",
"import TargetSelectionError # See catalog.py for the name of peewee model names corresponding",
"CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is for peewee SQL union",
"age<7.5 and h<13 \"\"\" name = 'mwm_yso_cluster_apogee' category = 'science' instrument = 'APOGEE'",
"TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass)",
"(AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro)",
"one): apogee_bright_3x1 (for 7 < H < 13) Implementation: (in sagitta | in",
"observations, RP magnitude check added to the previous selection \"\"\" name = 'mwm_yso_variable_boss'",
"15.5, (Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag -",
"counterpart in 2MASS, and all 2MASS have an entry in TIC, but not",
"more than one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if",
"model): \"\"\" cadence options for these targets: boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if",
"from mwm_yso_ob to request BOSS observations, assigning cadence and faint limit for carton",
"query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper (pre-)Main Sequence. Shorthand",
"and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category = 'science' instrument = 'APOGEE' cadence",
"> 0.3)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"(MIPSGAL.glon > 358) | (MIPSGAL.glon < 2), (MIPSGAL.glat > -1) & (MIPSGAL.glat <",
"180)) # l, b in Gaia_DR2 are gallong and gallat in TIC_v8. #",
"(AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro)",
"REFERENCES twomass_psc(designation) # # Due to below, we do not need a between",
"(AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m,",
"return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper (pre-)Main Sequence. Shorthand name:",
"than one): Pseudo SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5",
".join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))",
"https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee old",
"if l>180 # Hence: # ((b>-5) and (b<5)) or ((b<-5) and (l >",
"Pseudo SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro is null) or (w3mpro",
"from target_selection.cartons import BaseCarton from target_selection.exceptions import TargetSelectionError # See catalog.py for the",
"# mipsgal is a subset of 2MASS # 2MASS is a subset of",
"cross-matching needed: Return columns: Gaia id, 2mass id, G, BP, RP, J, H,",
"resulting selection was crossmatched against against Gaia with 1\" search radius. Return columns:",
"id, 2mass id, G, BP, RP, J, H, K, parallax cadence options for",
"Gaia_DR2.parallax. # Hence, in the above query, we cannot use TIC_v8.plx instead #",
"< 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag -",
"located further south of the plane if l>180 # Hence: # ((b>-5) and",
"in mwm_yso_variable_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if",
"\"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss ;\")",
"page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id, 2mass",
"diagram to select cool pre-main sequence stars, with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability",
"TIC_v8 # # 2MASS is not a subset of Gaia_DR2 # Gaia_DR2 is",
"the plane of the sky: all the targets should be within 5 deg",
"target will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and (phot_g_mean_mag>18.5",
"a subset of TIC_v8 # Gaia_DR2 is a subset of TIC_v8 # #",
"current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument",
"-*- # # @Author: <NAME> (<EMAIL>) # @Date: 2020-06-10 # @Filename: mwm_yso.py #",
"and (l > 180)) # l, b in Gaia_DR2 are gallong and gallat",
"TIC_v8.id)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id,",
"(MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5, (Gaia_DR2.parallax < 0.2) | (Gaia_DR2.parallax >> None))) if",
"(AllWise.w3mpro - AllWise.w4mpro) > 1.50, (AllWise.w3mpro - AllWise.w4mpro) > (AllWise.w1mpro - AllWise.w2mpro) *",
"APOGEE(optically invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old shorthand",
"== version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m <",
"then the resulting selection was crossmatched against against Gaia with 1\" search radius.",
"> Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp * 2.5",
"twomass_psc t on twomass_name = designation join tic_v8 tic on tic.twomass_psc = t.designation",
"peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error <",
"a subset of Gaia_DR2 # Gaia_DR2 is not a subset of 2MASS #",
"receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro",
"BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G, requiring variability in g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have",
"mwm_yso_cluster_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if current_instrument",
"# can be joined to gaia_dr2_source via source_id. # # table catalogdb.yso_clustering #",
"Filter on the position of the HR diagram to select cool pre-main sequence",
"query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),",
"invisible). Shorthand name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2 Simplified",
"S2_5 query below has the same part before where() as S2 query. def",
"selection criteria: Selecting the clustered sources from the catalog of clustered structures, with",
"- 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /",
"if RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: age<7.5 and rp<15.5",
"instead of H \"\"\" name = 'mwm_yso_cluster_boss' category = 'science' instrument = None",
"and # can be joined to gaia_dr2_source via source_id. # # table catalogdb.yso_clustering",
"CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # | is for peewee SQL union",
"target will receive more than one): Pseudo SQL (optional): Implementation: phot_g_mean_mag < 18.5",
"= 'MWM' priority = 2700 # mipsgal is a subset of 2MASS #",
"(optional): Implementation: phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5",
"(MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0,",
"in the above query, we cannot use TIC_v8.plx instead # of Gaia_DR2.parallax. if",
"power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11< phot_bp_mean_mag-5*(log10(1000/parallax)-1) and bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments:",
"not need a between to Catalog and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid #",
"Catalog doesn't give us anything extra and it's a costly join. def build_query(self,",
"get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_cluster_boss '",
"query_region[0], query_region[1], query_region[2]))) return query def post_process(self, model): \"\"\" cadence options for these",
"(should have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia, 2mass,",
"parallax>0.3. Filter on the position of the HR diagram to select cool pre-main",
"a costly join. def build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'),",
"than one): 'apogee_bright_3x1' Pseudo SQL (optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or",
"page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia, 2mass, allwise Additional cross-matching needed: Note:",
"+ \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee",
"# All cases should be covered above so we should not get here.",
"Note: Using the Gaia xmatch somehow misses half the sources. Selection was done",
"CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro -",
"'bright_4x1' elif(current_rp < 15.29): current_instrument = 'BOSS' current_cadence = 'bright_5x1' elif(current_rp < 15.5):",
"sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Kounkel+20 clustered catalog Additional cross-matching",
"Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with",
"criteria: Selecting the clustered sources from the catalog of clustered structures, with age<7.5",
"and b<-5 # S2_5 query below has the same part before where() as",
"optical variables). Shorthand name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3",
"G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources)",
"mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of selection",
"BaseCarton from target_selection.exceptions import TargetSelectionError # See catalog.py for the name of peewee",
"18.5 and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1",
"in b from the galactic center, (MIPSGAL.glon > 358) | (MIPSGAL.glon < 2),",
"== Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # |",
"= 'bright_5x1' elif(current_rp < 15.5): current_instrument = 'BOSS' current_cadence = 'bright_6x1' else: #",
"J-H>1.1. Some contaminants from scanning are filtered on the plane of the sky:",
">> True, TwoMassPSC.h_m < 13, (Gaia_DR2.phot_g_mean_mag > 18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass",
"peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 < Gaia_DR2.phot_bp_mean_mag - 5 * (peewee.fn.log(1000",
"parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category = 'science' instrument = 'APOGEE' cadence =",
"# # 2MASS is not a subset of Gaia_DR2 # Gaia_DR2 is not",
"Implementation: h_m<13 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) <",
"and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"xmatch included Return columns: mipsgal id, 2mass id, j, h, k, 3.6, 4.8,",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set instrument = '\" + current_instrument",
"null) and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \" set cadence = '\" +",
"(optional): Implementation: h_m<13 and bp_rp between -0.2 and 1.1 and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1)",
"outer join gaia_dr2_source g on g.source_id = tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id",
"Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax,",
"Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error",
"+ \"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton):",
"than one): Pseudo SQL (optional): Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro is null)",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main",
"< Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error",
"are filtered on the plane of the sky: all the targets should be",
"Removed below condition. l is glon (galactic longitude) b is glat (galactic latitude)",
"(galactic latitude) All four statements below are equivalent. (l> 358 or l< 2)",
"2) and (m.glat > -1 and m.glat < 1) and Sources are within",
"and brighter than H<13 mag. (should have ~45.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"class name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified Description of selection criteria: selection",
"= 'mwm_yso' mapper = 'MWM' priority = 2700 # yso_clustering is a subset",
"- Cluster BOSS Catalog Shorthand name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old shorthand",
"id, 2mass id, j, h, k, 3.6, 4.8, 8.0, 24 mag cadence options",
"TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8)",
"mwm_yso_cmz, removed check on the position on the sky: Removed below condition. l",
"None) & (AllWise.w4mpro >> None) & ((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)), ((Gaia_DR2.b >",
"= None raise TargetSelectionError('error in mwm_yso_ob_boss ' + 'post_process(): ' + 'instrument =",
"\" + str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically invisible).",
"2MASS # 2MASS is a subset of TIC_v8 # Gaia_DR2 is a subset",
"2700 # mipsgal is a subset of 2MASS # mipsgal can be joined",
"w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category = 'science' instrument = 'APOGEE'",
"\" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0]",
"True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # | is for peewee SQL union query =",
"4) & (AllWise.w4mpro >> None)) | ((AllWise.w3mpro >> None) & (AllWise.w4mpro >> None)",
"mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of selection",
"use TIC_v8.plx instead # of Gaia_DR2.parallax. if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
"and rp<15.5 Comments: Split from Cluster to request BOSS observations, assigning cadence and",
"W4,parallax cadence options for these targets (list all options, even though no single",
"where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central",
"and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set cadence = '\" + current_cadence",
"implementation has below clause # and (b>-5 or l>180) and b<-5 # Replace",
"or l>180) and b<-5 # S2_5 query below has the same part before",
"to request BOSS observations, RP magnitude check added to the previous selection \"\"\"",
"# # table catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name) #",
"mwm_yso_pms_apogee Comments: New Simplified Description of selection criteria: Selecting the clustered sources from",
"# Gaia_DR2 pweewee model class corresponds to # table catalogdb.gaia_dr2_source. # # All",
"counterpart). Comments: Formerly mwm_yso_cmz, removed check on the position on the sky: Removed",
"on IR excess, with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and",
"-1) & (MIPSGAL.glat < 1), \"\"\" name = 'mwm_yso_cmz_apogee' category = 'science' instrument",
"We are using the values from Gaia since # TIC propagates the coordinates",
"criteria: selection of sources in the central molecular zone based on spitzer fluxes",
"should be within 5 deg of the plane+ few sources that can be",
"- Disk APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old",
"2), (MIPSGAL.glat > -1) & (MIPSGAL.glat < 1), \"\"\" name = 'mwm_yso_cmz_apogee' category",
"the tip of the main sequence, brighter than H<13, G<18 mag, closer than",
"| boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5",
"AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50,",
"targets should be within 5 deg of the plane+ few sources that can",
"\"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss ;\")",
"\" set instrument = '\" + current_instrument + \"'\" \" where catalogid =",
"phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from mwm_yso_ob to request",
"mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified Description of selection",
"TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag",
"subset of TIC_v8 # # 2MASS is not a subset of Gaia_DR2 #",
"TargetSelectionError # See catalog.py for the name of peewee model names corresponding #",
"Pseudo SQL (optional): Implementation: age<7.5 and rp<15.5 Comments: Split from Cluster to request",
"- AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro) >",
"# We can remove the join with Catalog in all the cartons #",
"- OB BOSS Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton",
"old shorthand name: mwm_yso_ob Simplified Description of selection criteria: Selecting the OB stars",
"class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_apogee",
"# few sources that can be # located further south of the plane",
"on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2)",
"mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz Simplified Description of selection",
"class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR excess). Shorthand name: mwm_yso_disk_apogee old class",
"and phot_g_mean_mag<18 and phot_g_mean_mag-5*(log10(1000/parallax)-1) < 1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category",
"AllWise.w2mpro) * 0.8 + 1.1)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"m.hmag < 13 and (m.mag_8_0 - m.mag_24) > 2.5 and (g.parallax < 0.2",
"source catalogs needed: mipsgal Additional cross-matching needed: the table has xmatch included Return",
"on the allwise catalog that had 2mass photometry, and then the resulting selection",
"Shorthand name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description",
"& (Gaia_DR2.bp_rp < 1.1), Gaia_DR2.phot_g_mean_mag < 18, Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 /",
"selection \"\"\" name = 'mwm_yso_variable_boss' category = 'science' instrument = None # instrument",
"within 2 degrees in l and 1 degree in b from the galactic",
"target will receive more than one): Pseudo SQL (optional): Implementation: age<7.5 and h<13",
"15.5, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5",
"parallax >0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error",
">> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5)) if query_region: query = (query",
"Nebula APOGEE(optically invisible, WISE saturated). Shorthand name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old",
"((b>-5) and (b<5)) or ((b<-5) and (l > 180)) # l, b in",
"# so the join with Catalog doesn't give us anything extra and it's",
"> 180)) # l, b in Gaia_DR2 are gallong and gallat in TIC_v8.",
"the position of the HR diagram to select cool pre-main sequence stars, with",
"Catalog in all the cartons # since catalogid is completely unique (even across",
"1) < 1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax > 0.3)) if query_region: query",
"\"\"\" # peewee Model name ---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms'",
"13)) # | is for peewee SQL union query = query1 | query2",
"source catalogs needed: 2mass+allwise, gaia (allow sources that lack gaia xmatch) Additional cross-matching",
"raise TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process(): ' + 'instrument = None, cadence=",
"of the plane if l>180 # Hence: # ((b>-5) and (b<5)) or ((b<-5)",
".switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m <",
"added to the previous selection \"\"\" name = 'mwm_yso_variable_boss' category = 'science' instrument",
"sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument = '\" + current_instrument + \"'\" \"",
"Catalog and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid # We can remove the join",
"current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process():",
"# 2MASS is a subset of TIC_v8 # Gaia_DR2 is a subset of",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set cadence = '\"",
"(allow sources that lack gaia xmatch) Additional cross-matching needed: Note: Using the Gaia",
"# Gaia_DR2 is a subset of TIC_v8 # # 2MASS is not a",
"(m.mag_8_0 - m.mag_24) > 2.5 and (g.parallax < 0.2 or g.parallax is null)",
"version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13, YSO_Clustering.age < 7.5)) if query_region: query",
"on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >>",
"not get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_disk_boss",
"Catalog Shorthand name: mwm_yso_cluster_apogee old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified",
"TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag",
"RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\" cursor = self.database.execute_sql(",
"Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_g_mean_mag < 18.5, TwoMassPSC.h_m < 13,",
"13, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \"",
"CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # join with Zari18pms query2 = (CatalogToTIC_v8",
"select ct.catalogid from mipsgal m join twomass_psc t on twomass_name = designation join",
"via # mipsgal.twomass_name = TwoMassPSC.designation. # Then join via TIC and catalog_to_tic. #",
"> 0.3)) # Gaia_DR2 pweewee model class corresponds to # table catalogdb.gaia_dr2_source. #",
"postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc'",
"and b<-5 # Replace (b>-5 or l>180) and b<-5 as below based on",
"Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),",
"w2mpro-w3mpro>1 and w3mpro-w4mpro>1.5 and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1 \"\"\" name = 'mwm_yso_embedded_apogee' category = 'science' instrument",
"\" + \" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid",
"of H \"\"\" name = 'mwm_yso_disk_boss' category = 'science' instrument = None #",
"catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for i",
"is a subset of TIC_v8 # Gaia_DR2 is a subset of TIC_v8 #",
"with Catalog in all the cartons # since catalogid is completely unique (even",
";\") output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp =",
"TIC propagates the coordinates back to epoch 2000.0 # (b>-5 or l>180) and",
"selection criteria: selection of sources in the central molecular zone based on spitzer",
"# table catalogdb.mipsgal # Foreign-key constraints: # \"twomass_name_fk\" FOREIGN KEY (twomass_name) # REFERENCES",
"here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_variable_boss ' +",
"will receive more than one): Pseudo SQL (optional): Implementation: age<7.5 and h<13 \"\"\"",
"than H<13 (should have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"of TIC_v8 # # 2MASS is not a subset of Gaia_DR2 # Gaia_DR2",
"subset of gaia and # can be joined to gaia_dr2_source via source_id. #",
"sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 Comments: Split from mwm_yso_s3 to request BOSS observations,",
"Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation)) .join(TwoMassPSC, on=(TIC_v8.twomass_psc ==",
"main sequence, brighter than rp<15.5, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and",
"brighter than H<13, have color 8.0-24>2.5, and have parallax<0.2 or lack a Gaia",
"'MWM' priority = 2700 # Above implementation has below clause # and (b>-5",
"query below has the same part before where() as S2 query. def build_query(self,",
"is not a subset of 2MASS # # table catalogdb.mipsgal # Foreign-key constraints:",
"TIC_v8.id)) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id",
"based on RP instead of H \"\"\" name = 'mwm_yso_disk_boss' category = 'science'",
"source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia id, 2mass id, G, BP,",
"(CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m,",
"same as # values of Gaia_DR2.parallax. # Hence, in the above query, we",
".join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5,",
"Molecular Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old shorthand name:",
"< 2), (MIPSGAL.glat > -1) & (MIPSGAL.glat < 1), \"\"\" name = 'mwm_yso_cmz_apogee'",
"CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with Zari18pms query2 = (CatalogToTIC_v8",
"Return columns: mipsgal id, 2mass id, j, h, k, 3.6, 4.8, 8.0, 24",
"boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and",
"New Simplified Description of selection criteria: Selecting the clustered sources from the catalog",
"'mwm_yso_pms_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"joined to twomass_psc via # mipsgal.twomass_name = TwoMassPSC.designation. # Then join via TIC",
"W1-W2>0.5, W2-W3>1, W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"- 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) < 1.6 * Gaia_DR2.bp_rp -",
"Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax",
"BOSS observations, assigning cadence and faint limit for carton based on RP instead",
"not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set instrument = '\"",
"== CatalogToTIC_v8.catalogid # We can remove the join with Catalog in all the",
"Additional source catalogs needed: 2mass, gaia Additional cross-matching needed: Return columns: Gaia id,",
"name = 'mwm_yso_cluster_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"shorthand name: mwm_yso_s2_5 Simplified Description of selection criteria: selection of YSOs, brighter than",
"join twomass_psc t on twomass_name = designation join tic_v8 tic on tic.twomass_psc =",
"same part before where() as S2 query. def build_query(self, version_id, query_region=None): query =",
"join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag,",
"/ Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query =",
"glat (galactic latitude) All four statements below are equivalent. (l> 358 or l<",
"current_instrument + \"'\" \" where catalogid = \" + str(current_catalogid) + \";\") if",
"\" update sandbox.temp_mwm_yso_ob_boss \" + \" set instrument = '\" + current_instrument +",
"Shorthand name: mwm_yso_cluster_boss old class name: MWM_YSO_Cluster_Carton old shorthand name: mwm_yso_cluster Simplified Description",
"* 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) -",
"\" + \" sandbox.temp_mwm_yso_disk_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid",
"# # @Author: <NAME> (<EMAIL>) # @Date: 2020-06-10 # @Filename: mwm_yso.py # @License:",
"Implementation: age<7.5 and rp<15.5 Comments: Split from Cluster to request BOSS observations, assigning",
"name = 'mwm_yso_variable_boss' category = 'science' instrument = None # instrument is set",
"_8_0_-_24_>2.5 and (parallax<0.2 or parallax is null) For CMZ, the raw sql query",
"TIC and catalog_to_tic. # # mipsgal is a subset of 2MASS # 2MASS",
"== TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))",
"BP, RP, J, H, K, W1, W2, W3, W4,parallax cadence options for these",
"MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable APOGEE (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_apogee old",
"Kounkel+20 clustered catalog Additional cross-matching needed: Return columns: Gaia id, 2mass id, G,",
"Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal Additional cross-matching needed: the table",
"= None, cadence= None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB",
"= 'mwm_yso_nebula_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"== version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13, YSO_Clustering.age < 7.5)) if query_region:",
"sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog, CatalogToTIC_v8, Gaia_DR2, Sagitta, TIC_v8, TwoMassPSC, YSO_Clustering, Zari18pms) from",
"1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs)",
"# @Date: 2020-06-10 # @Filename: mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee",
"= 'mwm_yso_variable_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"(optically invisible). Shorthand name: mwm_yso_embedded_apogee old class name: MWM_YSO_S2_Carton old shorthand name: mwm_yso_s2",
"None raise TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process(): ' + 'instrument = None,",
"0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /",
"if RP<15.5 Implementation: (in sagitta | in zari18pms) & rp<15.5 lead contact:<NAME> \"\"\"",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_BOSS_Carton(BaseCarton): \"\"\"YSOs - Variable BOSS (pre-main sequence",
"version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with Zari18pms query2 =",
"catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for i",
"cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_disk_boss ;\") output",
"here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_disk_boss ' +",
"build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,",
"priority = 2700 def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for i",
"program = 'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self, version_id, query_region=None):",
"peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"raise TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process(): ' + 'instrument = None, cadence=",
"union query = query1 | query2 if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
">0.3 and bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error> sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and",
"> -1) & (MIPSGAL.glat < 1), \"\"\" name = 'mwm_yso_cmz_apogee' category = 'science'",
"not get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_cluster_boss",
".select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m,",
"gaia Additional cross-matching needed: Return columns: Gaia id, 2mass id, G, BP, RP,",
"needed: Return columns: 2mass id, allwise id, J, H, K, W1, W2, W3,",
"coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2020-06-10 # @Filename:",
"even though no single target will receive more than one): cadence options for",
"| ((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None)))) if query_region: query = (query",
"RP<15.29 | boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: phot_rp_mean_mag<15.5 and phot_g_mean_mag <",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \" set instrument = '\" +",
"w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category =",
"have parallax<0.2 or lack a Gaia xmatch. (should have ~3.2K sources) Wiki page:",
"See catalog.py for the name of peewee model names corresponding # to postgres",
"tic.gaia_int join catalog_to_tic_v8 ct on ct.target_id = tic.id where m.hmag < 13 and",
"= None raise TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process(): ' + 'instrument =",
"excess). Shorthand name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified",
"Sources are within 2 degrees in l and 1 degree in b from",
"\"\"\" name = 'mwm_yso_disk_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"CatalogToTIC_v8.catalogid # We can remove the join with Catalog in all the cartons",
"name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of selection criteria: selection of",
"Variable APOGEE (pre-main sequence optical variables). Shorthand name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton",
"one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 |",
"query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog Shorthand",
"== version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with Zari18pms query2",
"| boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\"",
"catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_cluster_boss ;\") output = cursor.fetchall() for i",
"str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular Zone APOGEE. Shorthand name:",
"Additional source catalogs needed: 2mass, allwise Additional cross-matching needed: Return columns: 2mass id,",
"join. def build_query(self, version_id, query_region=None): query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'),",
"outer join between TIC and Gaia (all MIPSGAL targets have a counterpart in",
"cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss ;\") output",
"g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95,",
"~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional cross-matching",
"priority = 2700 # yso_clustering is a subset of gaia and # can",
"rp<15.5 Comments: Split from Cluster to request BOSS observations, assigning cadence and faint",
"peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error * 0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs)",
"TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5, MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24, MIPSGAL.hmag, Gaia_DR2.parallax, MIPSGAL.glon, MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name ==",
"since catalogid is completely unique (even across different version_id) # so the join",
"Catalog.catalogid == CatalogToTIC_v8.catalogid # We can remove the join with Catalog in all",
"that can be located further south of the plane if l>180 (should have",
"None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5, (AllWise.w1mpro -",
"(peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02,",
"name: mwm_yso_variable_apogee old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description of",
"2.5 > Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp *",
"selection was crossmatched against against Gaia with 1\" search radius. Return columns: Gaia",
"saturated). Shorthand name: mwm_yso_nebula_apogee old class name: MWM_YSO_S2_5_Carton old shorthand name: mwm_yso_s2_5 Simplified",
"None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss \" + \"",
"the resulting selection was crossmatched against against Gaia with 1\" search radius. Return",
"- AllWise.w2mpro) > 0.50, (AllWise.w2mpro - AllWise.w3mpro) > 1.00, (AllWise.w3mpro - AllWise.w4mpro) >",
"1 (m.glon > 358 or m.glon < 2) and (m.glat > -1 and",
"\"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query =",
"k, 3.6, 4.8, 8.0, 24 mag cadence options for these targets (list all",
"'mwm_yso' mapper = 'MWM' priority = 2700 # mipsgal is a subset of",
"< 0.2 or g.parallax is null) and ct.version_id = 13 and ct.best is",
"name: mwm_yso_s1 Simplified Description of selection criteria: selection of YSOs based on IR",
"return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss",
"== TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True,",
"Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) < 1.6 * Gaia_DR2.bp_rp",
"TIC and Gaia (all MIPSGAL targets have a counterpart in 2MASS, and all",
"= None # instrument is set in post_process() cadence = None # cadence",
"is null and j_m-h_m>1.1) and (b>-5 or l>180) and b<-5 \"\"\" name =",
"- 5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1), peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs)",
"will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and",
"be located further south of the plane if l>180 (should have ~1.2K sources)",
"Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_apogee' category = 'science' instrument =",
"class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments: New,",
"not all the TIC entries have a Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed",
"True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8,",
"boss_bright_6x1 if RP<15.5 Implementation: (in sagitta | in zari18pms) & rp<15.5 lead contact:<NAME>",
"= None raise TargetSelectionError('error in mwm_yso_disk_boss ' + 'post_process(): ' + 'instrument =",
"the same part before where() as S2 query. def build_query(self, version_id, query_region=None): query",
"query_region[1], query_region[2]))) return query class MWM_YSO_OB_BOSS_Carton(BaseCarton): \"\"\"YSOs - OB BOSS Upper (pre-)Main Sequence.",
"boss_bright_6x1 if RP<15.5 Pseudo SQL (optional): Implementation: age<7.5 and rp<15.5 Comments: Split from",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Cluster_BOSS_Carton(BaseCarton): \"\"\"YSOs - Cluster BOSS Catalog",
".where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically",
"absolute mag (should have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed:",
"AllWise.w3mpro) > 4) & (AllWise.w4mpro >> None)) | ((AllWise.w3mpro >> None) & (AllWise.w4mpro",
"BOSS observations, RP magnitude check added to the previous selection \"\"\" name =",
"MIPSGAL.glat) .join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id ==",
"crossmatched against against Gaia with 1\" search radius. Return columns: Gaia id, 2mass",
"corresponding # to postgres table names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk",
"fainter than G>15 or without gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and",
"current_cadence = 'bright_6x1' else: # All cases should be covered above so we",
"b<-5 as below based on the text. # In words: # all the",
"cannot use TIC_v8.plx instead # of Gaia_DR2.parallax. if query_region: query = (query .join_from(CatalogToTIC_v8,",
"and b<-5 \"\"\" name = 'mwm_yso_nebula_apogee' category = 'science' instrument = 'APOGEE' cadence",
"priority = 2700 def build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'),",
"0.75, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error",
"cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp = output[i][1] if(current_rp <",
"than one): apogee_bright_3x1 (for 7 < H < 13) Implementation: (in sagitta |",
"Shorthand name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description",
"CMZ, the raw sql query would be: select ct.catalogid from mipsgal m join",
"of 2MASS # mipsgal can be joined to twomass_psc via # mipsgal.twomass_name =",
"Description of selection criteria: Selecting the clustered sources from the catalog of clustered",
"clustered structures, with age<7.5 dex and brighter than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function",
"with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,",
"catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id) def",
">> None))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1],",
"Simplified Description of selection criteria: selection of YSOs brighter than H<13, closer than",
"tic.twomass_psc = t.designation left outer join gaia_dr2_source g on g.source_id = tic.gaia_int join",
"sources from the catalog of clustered structures, with age<7.5 dex and brighter than",
"# of Gaia_DR2.parallax. if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
".join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation)) .join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int))",
"MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee old class name:",
"180)) | ((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None)))) if query_region: query =",
"Comments: New Simplified Description of selection criteria: Selecting the clustered sources from the",
"J, H, K, parallax cadence options for these targets (list all options, even",
"True, TwoMassPSC.h_m < 13)) # | is for peewee SQL union query =",
"OB BOSS Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old",
"to request BOSS observations, same color selection but assigning cadence and faint limit",
"no single target will receive more than one): cadence options for these targets:",
"catalogs needed: 2mass+allwise, gaia (allow sources that lack gaia xmatch) Additional cross-matching needed:",
"or phot_g_mean_mag is null) and j_m-h_m>1 and h_m-ks_m>0.5 and w1mpro-w2mpro>0.5 and w2mpro-w3mpro>1 and",
"1.6*bp_rp-2.2 and parallax>0.3 \"\"\" name = 'mwm_yso_ob_apogee' category = 'science' instrument = 'APOGEE'",
"# S2_5 query below has the same part before where() as S2 query.",
"Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs - Pre-main sequence,",
"+ str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_pms_boss",
"for these targets (list all options, even though no single target will receive",
"KEY (twomass_name) # REFERENCES twomass_psc(designation) # # Due to below, we do not",
"(peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) < 1.6 * Gaia_DR2.bp_rp - 2.2, Gaia_DR2.parallax >",
"target will receive more than one): Pseudo SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25",
"2mass+allwise, gaia (allow sources that lack gaia xmatch) Additional cross-matching needed: Note: Using",
"rp<15.5, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should have ~8.7K",
"\";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_variable_boss \" + \"",
"Gaia id, 2mass id, allwise id, G, BP, RP, J, H, K, W1,",
"@Date: 2020-06-10 # @Filename: mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from",
"values from Gaia since # TIC propagates the coordinates back to epoch 2000.0",
"self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_variable_boss ;\") output = cursor.fetchall()",
"Shorthand name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3 Simplified Description",
"catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE",
"here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_pms_boss ' +",
"mag (should have ~52.7K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass,",
"previous selection \"\"\" name = 'mwm_yso_variable_boss' category = 'science' instrument = None #",
".switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id,",
"name: mwm_yso_disk_apogee old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of",
"names: # https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py class MWM_YSO_Disk_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Disk APOGEE (IR excess). Shorthand name:",
"Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), TwoMassPSC.j_m, TwoMassPSC.h_m, TwoMassPSC.k_m, Gaia_DR2.parallax) .join(TIC_v8, on=(CatalogToTIC_v8.target_id",
"https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, allwise Additional cross-matching needed: Return columns: 2mass",
"have a counterpart in 2MASS, and all 2MASS have an entry in TIC,",
"if(current_rp < 14.76): current_instrument = 'BOSS' current_cadence = 'bright_3x1' elif(current_rp < 15.075): current_instrument",
"but assigning cadence and faint limit for carton based on RP instead of",
"cross-matching needed: Return columns: 2mass id, allwise id, J, H, K, W1, W2,",
"the TIC entries have a Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed check on",
"TwoMassPSC.h_m < 13, Gaia_DR2.parallax > 0.3, Gaia_DR2.bp_rp * 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag",
".where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # join with Zari18pms",
"query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class",
"current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set cadence",
"True, Gaia_DR2.phot_rp_mean_mag < 15.5, (AllWise.w1mpro - AllWise.w2mpro) > 0.25, (AllWise.w2mpro - AllWise.w3mpro) >",
"sky: Removed below condition. l is glon (galactic longitude) b is glat (galactic",
"raw sql query would be: select ct.catalogid from mipsgal m join twomass_psc t",
"2700 # yso_clustering is a subset of gaia and # can be joined",
"h<13 lead contact:<NAME> \"\"\" # peewee Model name ---> postgres table name #",
"# 2MASS is not a subset of Gaia_DR2 # Gaia_DR2 is not a",
"name = 'mwm_yso_disk_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program",
"boss_bright_5x1 if RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid,",
"old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified Description of selection criteria:",
"2mass, allwise Additional cross-matching needed: Note: Using the Gaia xmatch somehow misses half",
"\"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_pms_boss ;\")",
"and w3mpro-w4mpro>1.5 and parallax>0.3 \"\"\" name = 'mwm_yso_disk_apogee' category = 'science' instrument =",
"is a subset of TIC_v8 # # 2MASS is not a subset of",
"m.glat < 1) and Sources are within 2 degrees in l and 1",
"= None raise TargetSelectionError('error in mwm_yso_variable_boss ' + 'post_process(): ' + 'instrument =",
"True, YSO_Clustering.h < 13, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8,",
"sequence, BOSS Shorthand name: mwm_yso_pms_boss Comments: New, Split from PMS Simplified Description of",
"Gaia counterpart). Comments: Formerly mwm_yso_cmz, removed check on the position on the sky:",
"MIPSGAL targets have a counterpart in 2MASS, and all 2MASS have an entry",
"page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional cross-matching needed: Return columns:",
"of vetted pre-main sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta,",
"gaia xmatch) Additional cross-matching needed: Note: Using the Gaia xmatch somehow misses half",
"-*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2020-06-10 #",
"\"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_PMS_APOGEE_Carton(BaseCarton): \"\"\"",
"(Gaia_DR2.parallax >> None))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0],",
"peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /",
"name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified Description of",
".join(YSO_Clustering, on=(Gaia_DR2.source_id == YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, YSO_Clustering.h < 13,",
"\" update sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence = '\" + current_cadence +",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation))",
"mwm_yso_pms_boss Comments: New, Split from PMS Simplified Description of selection criteria: Selecting the",
"== AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (AllWise.w1mpro -",
"Gaia, 2mass, allwise Additional cross-matching needed: Note: Using the Gaia xmatch somehow misses",
"#!/usr/bin/env python # -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) #",
"from mwm_yso_s1 to request BOSS observations, same color selection but assigning cadence and",
"(should have ~3.2K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal Additional",
"https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: mipsgal Additional cross-matching needed: the table has xmatch",
"tic on tic.twomass_psc = t.designation left outer join gaia_dr2_source g on g.source_id =",
"= 'bright_4x1' elif(current_rp < 15.29): current_instrument = 'BOSS' current_cadence = 'bright_5x1' elif(current_rp <",
"- AllWise.w2mpro) * 0.8 + 1.1)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog)",
"== version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro) > 4)",
"build_query(self, version_id, query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'),",
"gaia_dr2_source(source_id) def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass,",
"Gaia_DR2.phot_g_mean_flux_over_error, 0.75), peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error < peewee.fn.power( peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error, 0.95), peewee.fn.log( peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs)",
"(query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs",
".switch(TIC_v8) .join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m <",
"excess). Shorthand name: mwm_yso_disk_boss old class name: MWM_YSO_S1_Carton old shorthand name: mwm_yso_s1 Simplified",
".join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set cadence =",
"W2, W3, W4,parallax cadence options for these targets (list all options, even though",
"(list all options, even though no single target will receive more than one):",
"query_region=None): query = (AllWise .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key, TwoMassPSC.designation.alias('twomass_psc_designation'), AllWise.designation.alias('allwise_designation'), Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,",
"Zone APOGEE. Shorthand name: mwm_yso_cmz_apogee old class name: MWM_YSO_CMZ_Carton old shorthand name: mwm_yso_cmz",
"1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) / Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /",
"mapper = 'MWM' priority = 2700 # mipsgal is a subset of 2MASS",
"sky: all the targets should be within 5 deg of the plane+ few",
"old shorthand name: mwm_yso_s2_5 Simplified Description of selection criteria: selection of YSOs, brighter",
"Gaia id, 2mass id, G, BP, RP, J, H, K, parallax cadence options",
"= '\" + current_cadence + \"'\" \" where catalogid = \" + str(current_catalogid)",
"in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x is the absolute",
"sagitta | in zari18pms) & rp<15.5 lead contact:<NAME> \"\"\" # peewee Model name",
"more than one): Pseudo SQL (optional): Implementation: h_m<13 and bp_rp between -0.2 and",
"mwm_yso_disk_boss ' + 'post_process(): ' + 'instrument = None, cadence= None') if current_instrument",
"priority = 2700 # Above implementation has below clause # and (b>-5 or",
"set cadence = '\" + current_cadence + \"'\" \" where catalogid = \"",
"'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 def build_query(self, version_id,",
"\" + \" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid",
"from mipsgal. brighter than H<13, have color 8.0-24>2.5, and have parallax<0.2 or lack",
"H<15, saturated (blank) W4 with W2-W3>4, or saturated W3 and W2, with J-H>1.1.",
"(parallax<0.2 or parallax is null) For CMZ, the raw sql query would be:",
"0.2) | (Gaia_DR2.parallax >> None))) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"than H<13, closer than parallax>0.3. Filter on the position of the HR diagram",
"degrees in l and 1 degree in b from the galactic center, (MIPSGAL.glon",
"Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13)) # | is",
"on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >>",
"= (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton):",
"15.5, YSO_Clustering.age < 7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec,",
"(even across different version_id) # so the join with Catalog doesn't give us",
"Zari18pms) from target_selection.cartons import BaseCarton from target_selection.exceptions import TargetSelectionError # See catalog.py for",
"+ str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss",
"based on spitzer fluxes from mipsgal. brighter than H<13, have color 8.0-24>2.5, and",
"clustered structures, with age<7.5 dex and brighter than H<13 mag. (should have ~45.5K",
"with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5, closer than parallax>0.3, and brighter than H<13",
"None') if current_instrument is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \"",
"longitude) b is glat (galactic latitude) All four statements below are equivalent. (l>",
"limit for carton based on RP instead of H \"\"\" name = 'mwm_yso_ob_boss'",
"> 0.3, Gaia_DR2.bp_rp * 2.5 + 2.5 > Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000",
"peewee Model name ---> postgres table name # Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source' # Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms' # Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums'",
"== version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5)) if query_region:",
"W3-W4>1.5, and relates (W3-W4)>(W1-W2)*0.5+1.1 (should have ~11.6K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source",
"= output[i][0] current_rp = output[i][1] if(current_rp < 14.76): current_instrument = 'BOSS' current_cadence =",
"than one): boss_bright_3x1 if RP<14.76 | boss_bright_4x1 if RP<15.075 | boss_bright_5x1 if RP<15.29",
"mwm_yso.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import peewee from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise,",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_PMS_BOSS_Carton(BaseCarton): \"\"\" YSOs -",
"requiring variability in g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability",
"if RP<15.29 | boss_bright_6x1 if RP<15.5 \"\"\" cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp",
"query_region=None): # join with Sagitta query1 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key,",
"stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns: Gaia",
"is not None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set cadence =",
"(galactic longitude) b is glat (galactic latitude) All four statements below are equivalent.",
"Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query class MWM_YSO_Variable_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Variable",
"sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95, and log10(var_bp)*5+11<M_BP, in which M_x",
"table catalogdb.yso_clustering # Foreign-key constraints: # \"yso_clustering_source_id_fkey\" FOREIGN KEY (source_id) # REFERENCES gaia_dr2_source(source_id)",
"> 0.25, (AllWise.w2mpro - AllWise.w3mpro) > 0.50, (AllWise.w3mpro - AllWise.w4mpro) > 1.50, Gaia_DR2.parallax",
"TwoMassPSC, YSO_Clustering, Zari18pms) from target_selection.cartons import BaseCarton from target_selection.exceptions import TargetSelectionError # See",
".switch(TIC_v8) .join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id,",
"catalogid = \" + str(current_catalogid) + \";\") class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Central Molecular",
"or parallax is null) For CMZ, the raw sql query would be: select",
"different version_id) # so the join with Catalog doesn't give us anything extra",
"\";\") class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Embedded APOGEE (optically invisible). Shorthand name: mwm_yso_embedded_apogee old",
"self.database.execute_sql( \" update sandbox.temp_mwm_yso_ob_boss \" + \" set cadence = '\" + current_cadence",
"have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: Gaia, 2mass, allwise",
"and phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and bp_rp*2.5+2.5 >",
"= 'mwm_yso_cmz_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program =",
"TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process(): ' + 'instrument = None, cadence= None')",
"the join with Catalog doesn't give us anything extra and it's a costly",
"+ \" set instrument = '\" + current_instrument + \"'\" \" where catalogid",
"sequence, APOGEE Shorthand name: mwm_yso_pms_apogee Comments: New Simplified Description of selection criteria: Selecting",
"\"\"\" name = 'mwm_yso_cluster_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1'",
"'mwm_yso_nebula_apogee' category = 'science' instrument = 'APOGEE' cadence = 'bright_3x1' program = 'mwm_yso'",
"https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass+allwise, gaia (allow sources that lack gaia xmatch)",
"https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: 2mass, gaia Additional cross-matching needed: Return columns: Gaia",
"YSO_Clustering.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5, YSO_Clustering.age < 7.5))",
"output = cursor.fetchall() for i in range(len(output)): current_catalogid = output[i][0] current_rp = output[i][1]",
"query_region[2]))) return query class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton): \"\"\"YSOs - Nebula APOGEE(optically invisible, WISE saturated). Shorthand",
"(optional): Implementation: Hmag<13 and _8_0_-_24_>2.5 and (parallax<0.2 or parallax is null) For CMZ,",
"selection of YSOs based on IR excess, with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5,",
"(pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob",
"id, G, BP, RP, J, H, K, W1, W2, W3, W4 cadence options",
"of selection criteria: selection of YSOs, brighter than H<15, saturated (blank) W4 with",
"version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m < 13, (((AllWise.w2mpro - AllWise.w3mpro) > 4) &",
"version_id) # so the join with Catalog doesn't give us anything extra and",
"than one): Pseudo SQL (optional): Implementation: h_m<13 and bp_rp between -0.2 and 1.1",
"l, b in Gaia_DR2 are gallong and gallat in TIC_v8. # We are",
"located further south of the plane if l>180 (should have ~1.2K sources) Wiki",
"BP, RP, J, H, K, parallax cadence options for these targets: boss_bright_3x1 if",
"epoch 2000.0 # (b>-5 or l>180) and b<-5 # S2_5 query below has",
"included Return columns: mipsgal id, 2mass id, j, h, k, 3.6, 4.8, 8.0,",
"cursor = self.database.execute_sql( \"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss ;\") output",
"variability in g,bp,rp>0.02 (with var_x defined as sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error), have relations in variability of",
"> 18.5) | (Gaia_DR2.phot_g_mean_mag >> None), (AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0, (AllWise.h_m_2mass -",
"is null and w4mpro is null and j_m-h_m>1.1) and (b>-5 or l>180) and",
"7.5)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2])))",
"sequence stars Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms Return columns:",
"scanning are filtered on the plane of the sky: all the targets should",
"Sequence. Shorthand name: mwm_yso_ob_boss old class name: MWM_YSO_OB_Carton old shorthand name: mwm_yso_ob Simplified",
"corresponds to # table catalogdb.gaia_dr2_source. # # All values of TIC_v8.plx (for non-null",
"- 2.2, Gaia_DR2.parallax > 0.3)) if query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra,",
"below condition. l is glon (galactic longitude) b is glat (galactic latitude) All",
"def build_query(self, version_id, query_region=None): query = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag,",
"parallax>0.3, and brighter than H<13 (should have ~21.5K sources) Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"brighter than H<13, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2 (should",
"Gaia_DR2 pweewee model class corresponds to # table catalogdb.gaia_dr2_source. # # All values",
"Some contaminants from scanning are filtered on the plane of the sky: all",
"structures, with age<7.5 dex and brighter than rp<15.5 mag. Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function Additional",
"Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums' # Sagitta(CatalogdbModel)--->'catalogdb.sagitta' # TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc' name = 'mwm_yso_pms_boss' category = 'science' instrument =",
"J, H, K, W1, W2, W3, W4,parallax cadence options for these targets (list",
"= TwoMassPSC.designation. # Then join via TIC and catalog_to_tic. # # mipsgal is",
"W2-W3>4, or saturated W3 and W2, with J-H>1.1. Some contaminants from scanning are",
"RP<15.29 | boss_bright_6x1 if RP<15.5 Implementation: (in sagitta | in zari18pms) & rp<15.5",
"(l> 358 or l< 2) and b between -1 and 1 (m.glon >",
"Pseudo SQL (optional): Implementation: h_m<13 and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3",
"the previous selection \"\"\" name = 'mwm_yso_variable_boss' category = 'science' instrument = None",
"old shorthand name: mwm_yso_s3 Simplified Description of selection criteria: selection of YSOs brighter",
"current_cadence + \"'\" \" where catalogid = \" + str(current_catalogid) + \";\") class",
"or l< 2) and b between -1 and 1 (m.glon > 358 or",
"\" + \" sandbox.temp_mwm_yso_pms_boss ;\") output = cursor.fetchall() for i in range(len(output)): current_catalogid",
"= \" + str(current_catalogid) + \";\") if current_cadence is not None: self.database.execute_sql( \"",
"TwoMassPSC.designation. # Then join via TIC and catalog_to_tic. # # mipsgal is a",
"(CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), YSO_Clustering.twomass, Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag, Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'), YSO_Clustering.j, YSO_Clustering.h, YSO_Clustering.k, Gaia_DR2.parallax)",
"None raise TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process(): ' + 'instrument = None,",
">> True, Gaia_DR2.phot_rp_mean_mag < 15.5)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid,",
"on=(TIC_v8.gaia_int == Gaia_DR2.source_id)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >>",
"1.6*bp_rp-2.2 and parallax>0.3 Comments: Split from mwm_yso_ob to request BOSS observations, assigning cadence",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_cluster_boss \" + \" set instrument = '\" +",
"class MWM_YSO_OB_APOGEE_Carton(BaseCarton): \"\"\"YSOs - OB APOGEE Upper (pre-)Main Sequence. Shorthand name: mwm_yso_ob_apogee old",
"left outer join between TIC and Gaia (all MIPSGAL targets have a counterpart",
".join(Zari18pms, on=(Gaia_DR2.source_id == Zari18pms.source)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag < 15.5))",
"/ Gaia_DR2.parallax) - 1), Gaia_DR2.bp_rp > 1.3, peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) / Gaia_DR2.phot_g_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs)",
"| ((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180)) | ((Gaia_DR2.b >> None) &",
"not get here. current_instrument = None current_cadence = None raise TargetSelectionError('error in mwm_yso_pms_boss",
"the main sequence, brighter than rp<15.5, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1,",
"\"select catalogid, gaia_dr2_rp from \" + \" sandbox.temp_mwm_yso_ob_boss ;\") output = cursor.fetchall() for",
"= None current_cadence = None raise TargetSelectionError('error in mwm_yso_pms_boss ' + 'post_process(): '",
"sequence, brighter than rp<15.5, G<18 mag, closer than parallax>0.3, color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2",
"filtered on the plane of the sky: all the targets should be within",
"than H<13, fainter than G>15 or without gaia detection, colors J-H>0,5, W1-W2>0.5, W2-W3>1,",
"optical variables). Shorthand name: mwm_yso_variable_boss old class name: MWM_YSO_S3_Carton old shorthand name: mwm_yso_s3",
"need a between to Catalog and CatalogToTIC_v8 # Catalog.catalogid == CatalogToTIC_v8.catalogid # We",
"bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02 and sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02 \"\"\" name = 'mwm_yso_variable_apogee' category =",
"None: self.database.execute_sql( \" update sandbox.temp_mwm_yso_disk_boss \" + \" set cadence = '\" +",
"TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, TwoMassPSC.h_m",
"TwoMassPSC.designation)) .join(Gaia_DR2, peewee.JOIN.LEFT_OUTER, on=(Gaia_DR2.source_id == TIC_v8.gaia_int)) .switch(TIC_v8) .join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id)) .where(CatalogToTIC_v8.version_id ==",
"TwoMassPSC.designation)) .switch(Gaia_DR2) .join(Sagitta, on=(Gaia_DR2.source_id == Sagitta.source_id)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >> True, Gaia_DR2.phot_rp_mean_mag",
"query def post_process(self, model): \"\"\" cadence options for these targets: boss_bright_3x1 if RP<14.76",
"= 'bright_3x1' program = 'mwm_yso' mapper = 'MWM' priority = 2700 # Above",
"no single target will receive more than one): Pseudo SQL (optional): Implementation: h_m<13",
"- 1), Gaia_DR2.bp_rp * 2.5 - 1 < Gaia_DR2.phot_g_mean_mag - 5 * (peewee.fn.log(1000",
"columns: Gaia id, 2mass id, allwise id, G, BP, RP, J, H, K,",
"query_region: query = (query .join_from(CatalogToTIC_v8, Catalog) .where(peewee.fn.q3c_radial_query(Catalog.ra, Catalog.dec, query_region[0], query_region[1], query_region[2]))) return query",
"shorthand name: mwm_yso_cmz Simplified Description of selection criteria: selection of sources in the",
"query_region[1], query_region[2]))) return query def post_process(self, model): \"\"\" cadence options for these targets:",
"None raise TargetSelectionError('error in mwm_yso_cluster_boss ' + 'post_process(): ' + 'instrument = None,",
"and w1mpro-w2mpro>0.25 and w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3 Comments: Split from mwm_yso_s1 to",
"13)) # join with Zari18pms query2 = (CatalogToTIC_v8 .select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id, Gaia_DR2.ra.alias('gaia_dr2_ra'), Gaia_DR2.dec.alias('gaia_dr2_dec'), TwoMassPSC.pts_key,",
"the position on the sky: Removed below condition. l is glon (galactic longitude)",
"Gaia_DR2.phot_bp_mean_flux_over_error > 0.02, peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) / Gaia_DR2.phot_rp_mean_flux_over_error > 0.02)) if query_region: query = (query",
"on=(TIC_v8.twomass_psc == TwoMassPSC.designation)) .switch(TIC_v8) .join(AllWise, on=(TIC_v8.allwise == AllWise.designation)) .where(CatalogToTIC_v8.version_id == version_id, CatalogToTIC_v8.best >>",
"2mass, allwise Additional cross-matching needed: Return columns: 2mass id, allwise id, J, H,",
"catalog_to_tic. # # mipsgal is a subset of 2MASS # 2MASS is a",
"((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180)) | ((Gaia_DR2.b >> None) & (Gaia_DR2.l",
"Implementation: h_m<13 and (w2mpro-w3mpro>4 and w4mpro is null) or (w3mpro is null and"
] |
[
"coding: utf-8 -*- \"\"\" @author: children1987 \"\"\" from utils import get_config_info, replace_in_file def",
"\"'USER': 'root'\") config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD':",
"\"\"\" @author: children1987 \"\"\" from utils import get_config_info, replace_in_file def main(): cfg_file =",
"password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f",
"'root'\") config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password)",
"= config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f =",
"使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip",
"\"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\",",
"config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) )",
"replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL = 'http://{}:6062/'\".format(ip) ) if __name__ == '__main__':",
"-*- coding: utf-8 -*- \"\"\" @author: children1987 \"\"\" from utils import get_config_info, replace_in_file",
") # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f =",
"\"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\")",
"from utils import get_config_info, replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file,",
"= config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL = 'http://{}:6062/'\".format(ip) ) if __name__",
"'{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f",
"children1987 \"\"\" from utils import get_config_info, replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' #",
"\"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\",",
"replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\")",
"get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问",
"@author: children1987 \"\"\" from utils import get_config_info, replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py'",
"replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f,",
"replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file,",
"'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL =",
"utf-8 -*- \"\"\" @author: children1987 \"\"\" from utils import get_config_info, replace_in_file def main():",
"\"\"\" from utils import get_config_info, replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置",
"'/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL = 'http://{}:6062/'\".format(ip) )",
"'/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info() password =",
"\"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD':",
"get_config_info, replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER':",
"= '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info() password",
"utils import get_config_info, replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER':",
"修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file(",
"f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL =",
"<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\" @author: children1987 \"\"\" from utils import",
"= '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip']",
"f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip =",
"= '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL = 'http://{}:6062/'\".format(ip)",
"import get_config_info, replace_in_file def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\",",
"'0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL",
"def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info",
"'<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host:",
"ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL = 'http://{}:6062/'\".format(ip) ) if",
"f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL = 'http://{}:6062/'\".format(ip) ) if __name__ == '__main__': main()",
"'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\",",
"= get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) #",
"# 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js'",
"# 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info() password = config_info['mysql']['root_password']",
"'/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host: 'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file(",
"replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f,",
"cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, \"host:",
"# -*- coding: utf-8 -*- \"\"\" @author: children1987 \"\"\" from utils import get_config_info,",
"cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info = get_config_info()",
"config_info['mysql']['root_password'] replace_in_file( cfg_file, \"'PASSWORD': '<PASSWORD>'\", \"'PASSWORD': '{}'\".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js'",
"-*- \"\"\" @author: children1987 \"\"\" from utils import get_config_info, replace_in_file def main(): cfg_file",
"config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL = 'http://127.0.0.1:6062/'\", \"axios.defaults.baseURL = 'http://{}:6062/'\".format(ip) ) if __name__ ==",
"main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, \"'USER': 'shutongflow'\", \"'USER': 'root'\") config_info =",
"\"host: 'localhost'\", \"host: '0.0.0.0'\") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, \"axios.defaults.baseURL"
] |
[
"PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT",
"str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" +",
"to amp size (smallest is best) and then print to all_amplicon all_primers_sorted =",
"\"_primer_\" + str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual +",
"primer file (for filtering), first put in dict, will be sorted (see below)",
"+ chrom + \"\\t\" + start + \"\\t\" + end + '\\t' +",
"range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\"",
"'\\t' + REV_pos +'\\t' + REV_len + '\\t' + PRIMER_LEFT_TM + '\\t' +",
"for x in circ_info_keys): general_info.write(info + '=' + str(primers[info]) +'\\n') general_info.close() # make",
"input_primers = args.i[0] primer_in = open(input_primers) primers = {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\",",
"info into dictionary for line in primer_in: key, value = line.split(\"=\") value =",
"= args.i[0] primer_in = open(input_primers) primers = {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\")",
"primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"]",
"in primers: if \"_NUM_\" in info or \"_EXPLAIN\" in info or any(x in",
"first put in dict, will be sorted (see below) all_primers_dict[circ_ID + \"\\t\" +",
"= primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\" REV =",
"REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM",
"= primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")]",
"str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\"",
"# bowtie input file # write FWD + REV primer_file.write(circ_ID + \"_primer_\" +",
"+ \"_GC_PERCENT\")] # bowtie input file # write FWD + REV primer_file.write(circ_ID +",
"\"\\n\") # write REV + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_REV\"",
"value = value.rstrip() primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID,",
"+ \"\\n\") # write REV + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) +",
"dictionary for line in primer_in: key, value = line.split(\"=\") value = value.rstrip() primers[key]",
"\"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index)",
"file with list primers all_primers = open(\"all_primers_\" + circ_ID + \".txt\", 'w') all_amplicon",
"\"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index)",
"+ \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] # bowtie input file",
"= primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] # bowtie input file # write FWD",
"= primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" + circ_ID +",
"\"\\t\" + FWD_qual + \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\")",
"circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info into file general_info =",
"+ \".txt\", \"a\") for info in primers: if \"_NUM_\" in info or \"_EXPLAIN\"",
"\"a\") for info in primers: if \"_NUM_\" in info or \"_EXPLAIN\" in info",
"info or any(x in info for x in circ_info_keys): general_info.write(info + '=' +",
"\"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + REV +",
"# read general info into file general_info = open(\"general_primer_design_info_\" + circ_ID + \".txt\",",
"circ_ID + \".txt\", 'w') all_primers_dict = {} for primer_index in range(int(nr_p_out)): FWD =",
"write FWD + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_FWD\" + \"\\t\")",
"+ REV_qual + \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") #",
"+ \"_primer_\" + str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual",
"make general file with list primers all_primers = open(\"all_primers_\" + circ_ID + \".txt\",",
"open(\"all_primers_\" + circ_ID + \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID + \".txt\",",
"str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" +",
"+ '\\t' + PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT +",
"+ str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\" +",
"+ \".txt\", \"a\") # make general file with list primers all_primers = open(\"all_primers_\"",
"amplicon + \"\\n\") # general primer file (for filtering), first put in dict,",
"# read all info into dictionary for line in primer_in: key, value =",
"primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD)",
"\"\\n\") # get amplicon and make file for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+",
"or \"_EXPLAIN\" in info or any(x in info for x in circ_info_keys): general_info.write(info",
"circ_ID + \".txt\", \"a\") # make general file with list primers all_primers =",
"+ \"_primer\" + str(primer_index) + \"_\" + amplicon + \"\\n\") # general primer",
"REV_len + '\\t' + PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT",
"PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon",
"+ \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") # write REV",
"\"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + REV + \"\\t\" +",
"# sort primers according to amp size (smallest is best) and then print",
"\"\\t\" + FWD_qual + \"\\n\") # write REV + REV primer_file.write(circ_ID + \"_primer_\"",
"FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\"",
"make file for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+",
"file for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\")",
"* \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" +",
"circ_ID, chrom, start, end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general",
"\"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") # write REV +",
"str(primers[info]) +'\\n') general_info.close() # make file for bowtie primer_file = open(\"primer_spec_input_\" + circ_ID",
"\".txt\", \"a\") # make general file with list primers all_primers = open(\"all_primers_\" +",
"# get amplicon and make file for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\")",
"REV + \"\\t\" + REV_qual + \"\\n\") # get amplicon and make file",
"FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos)",
"+ str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\"",
"+ \"\\t\" + FWD_qual + \"\\n\") # write REV + REV primer_file.write(circ_ID +",
"= {k: v for k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])} for primer",
"= primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")]",
"key, value = line.split(\"=\") value = value.rstrip() primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"]",
"+ \"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + REV",
"(see below) all_primers_dict[circ_ID + \"\\t\" + chrom + \"\\t\" + start + \"\\t\"",
"\"\\n\") # general primer file (for filtering), first put in dict, will be",
"(smallest is best) and then print to all_amplicon all_primers_sorted = {k: v for",
"end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info into file",
"\"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + FWD + \"\\t\" +",
"\"\\t\" + REV_qual + \"\\n\") # get amplicon and make file for NUPACK",
"FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) +",
"FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\"",
"all_primers_dict = {} for primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) +",
"in primer_in: key, value = line.split(\"=\") value = value.rstrip() primers[key] = value template",
"read all info into dictionary for line in primer_in: key, value = line.split(\"=\")",
"+ str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" +",
"+ \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" +",
"+ circ_ID + \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w')",
"+ \"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + REV",
"+ \"\\n\") # write REV + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) +",
"'\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon + '\\n'] = len(amplicon) # sort",
"'\\t' + FWD_len + '\\t' + REV_pos +'\\t' + REV_len + '\\t' +",
"FWD_pos + '\\t' + FWD_len + '\\t' + REV_pos +'\\t' + REV_len +",
"+ str(primer_index) + \"_\" + amplicon + \"\\n\") # general primer file (for",
"circ_ID + \"_primer\" + str(primer_index) + \"_\" + amplicon + \"\\n\") # general",
"in sorted(all_primers_dict.items(), key=lambda item: item[1])} for primer in all_primers_sorted: all_primers.write(primer) primer_file.close() all_primers.close() all_amplicon.close()",
"info or \"_EXPLAIN\" in info or any(x in info for x in circ_info_keys):",
"start, end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info into",
"primers: if \"_NUM_\" in info or \"_EXPLAIN\" in info or any(x in info",
"in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD) *",
"\"_primer_\" + str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual +",
"value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end = circRNA.split(\"_\")",
"value.rstrip() primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start,",
"bowtie primer_file = open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\") # make general file",
"+ FWD_pos + '\\t' + FWD_len + '\\t' + REV_pos +'\\t' + REV_len",
"'\\t' + FWD_pos + '\\t' + FWD_len + '\\t' + REV_pos +'\\t' +",
"primer file') args = parser.parse_args() input_primers = args.i[0] primer_in = open(input_primers) primers =",
"\"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + FWD +",
"all_primers_dict[circ_ID + \"\\t\" + chrom + \"\\t\" + start + \"\\t\" + end",
"+ REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV +",
"+ \"\\t\" + start + \"\\t\" + end + '\\t' + str(primer_index) +",
"parser.add_argument('-i', nargs=1, required=True, help='input primer file') args = parser.parse_args() input_primers = args.i[0] primer_in",
"\"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") # write REV +",
"to all_amplicon all_primers_sorted = {k: v for k, v in sorted(all_primers_dict.items(), key=lambda item:",
"= {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info into dictionary",
"primer_in.close() # read general info into file general_info = open(\"general_primer_design_info_\" + circ_ID +",
"{k: v for k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])} for primer in",
"+ \"_primer_\" + str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual",
"FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\" REV",
"+ \"\\t\" + FWD_qual + \"\\n\") # write FWD + FWD primer_file.write(circ_ID +",
"chrom, start, end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info",
"\".txt\", 'w') all_primers_dict = {} for primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" +",
"+ \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + FWD",
"nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info into file general_info = open(\"general_primer_design_info_\"",
"+ '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon + '\\n'] = len(amplicon) #",
"+ end + '\\t' + str(primer_index) + '\\t' + FWD + '\\t' +",
"filtering), first put in dict, will be sorted (see below) all_primers_dict[circ_ID + \"\\t\"",
"+ str(primer_index) + \"_GC_PERCENT\")] # bowtie input file # write FWD + REV",
"in info for x in circ_info_keys): general_info.write(info + '=' + str(primers[info]) +'\\n') general_info.close()",
"NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon =",
"into dictionary for line in primer_in: key, value = line.split(\"=\") value = value.rstrip()",
"REV_qual + \"\\n\") # get amplicon and make file for NUPACK FWD_pos, FWD_len",
"+ '\\t' + PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT +",
"and make file for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len =",
"REV + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV",
"for bowtie primer_file = open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\") # make general",
"+ FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV +",
"open(input_primers) primers = {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info",
"then print to all_amplicon all_primers_sorted = {k: v for k, v in sorted(all_primers_dict.items(),",
"str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" +",
"+ str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\"",
"primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end",
"FWD + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD",
"args = parser.parse_args() input_primers = args.i[0] primer_in = open(input_primers) primers = {} circ_info_keys",
"str(primer_index) + \"_\" + amplicon + \"\\n\") # general primer file (for filtering),",
"all_amplicon = open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w') all_primers_dict = {} for primer_index",
"\"_primer\" + str(primer_index) + \"_\" + amplicon + \"\\n\") # general primer file",
"= value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end =",
"+ REV + '\\t' + FWD_pos + '\\t' + FWD_len + '\\t' +",
"line in primer_in: key, value = line.split(\"=\") value = value.rstrip() primers[key] = value",
"\"\\n\") # write REV + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_FWD\"",
"\"a\") # make general file with list primers all_primers = open(\"all_primers_\" + circ_ID",
"template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\" + str(primer_index) + \"_\"",
"primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input primer file') args = parser.parse_args() input_primers =",
"+ str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\"",
"\"_SEQUENCE\")] REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")]",
"+ str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\"",
"make file for bowtie primer_file = open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\") #",
"REV_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") # write",
"+ FWD_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") #",
"'w') all_primers_dict = {} for primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index)",
"x in circ_info_keys): general_info.write(info + '=' + str(primers[info]) +'\\n') general_info.close() # make file",
"+ REV + \"\\t\" + REV_qual + \"\\n\") # write REV + FWD",
"+ FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD +",
"file (for filtering), first put in dict, will be sorted (see below) all_primers_dict[circ_ID",
"#!/usr/bin/python3 import argparse parser = argparse.ArgumentParser(description='give arguments to main primer_xc script') parser.add_argument('-i', nargs=1,",
"str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index)",
"= primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1]",
"primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\"",
"\"_primer_\" + str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual +",
"+ REV_len + '\\t' + PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM + '\\t' +",
"= parser.parse_args() input_primers = args.i[0] primer_in = open(input_primers) primers = {} circ_info_keys =",
"+ REV_qual + \"\\n\") # write REV + FWD primer_file.write(circ_ID + \"_primer_\" +",
"+ \"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + FWD",
"circ_ID + \".txt\", \"a\") for info in primers: if \"_NUM_\" in info or",
"all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\" + str(primer_index) + \"_\" + amplicon +",
"file') args = parser.parse_args() input_primers = args.i[0] primer_in = open(input_primers) primers = {}",
"+ amplicon + '\\n'] = len(amplicon) # sort primers according to amp size",
"+ '\\n'] = len(amplicon) # sort primers according to amp size (smallest is",
"(\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info into dictionary for line in primer_in:",
"in info or any(x in info for x in circ_info_keys): general_info.write(info + '='",
"= open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w') all_primers_dict = {} for primer_index in",
"general primer file (for filtering), first put in dict, will be sorted (see",
"+ '\\t' + REV_pos +'\\t' + REV_len + '\\t' + PRIMER_LEFT_TM + '\\t'",
"+ REV_qual + \"\\n\") # get amplicon and make file for NUPACK FWD_pos,",
"+ \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w') all_primers_dict =",
"'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w') all_primers_dict = {} for",
"in dict, will be sorted (see below) all_primers_dict[circ_ID + \"\\t\" + chrom +",
"help='input primer file') args = parser.parse_args() input_primers = args.i[0] primer_in = open(input_primers) primers",
"primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV + \"\\t\" +",
"for info in primers: if \"_NUM_\" in info or \"_EXPLAIN\" in info or",
"any(x in info for x in circ_info_keys): general_info.write(info + '=' + str(primers[info]) +'\\n')",
"+ str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT =",
"FWD_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") # write",
"+ \"\\t\" + end + '\\t' + str(primer_index) + '\\t' + FWD +",
"'\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon + '\\n']",
"below) all_primers_dict[circ_ID + \"\\t\" + chrom + \"\\t\" + start + \"\\t\" +",
"nargs=1, required=True, help='input primer file') args = parser.parse_args() input_primers = args.i[0] primer_in =",
"chrom + \"\\t\" + start + \"\\t\" + end + '\\t' + str(primer_index)",
"\"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + REV + \"\\t\" +",
"+ str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] # bowtie",
"get amplicon and make file for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos,",
"+ FWD_len + '\\t' + REV_pos +'\\t' + REV_len + '\\t' + PRIMER_LEFT_TM",
"primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT",
"for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon",
"PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) +",
"\"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + FWD +",
"write FWD + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_REV\" + \"\\t\")",
"+ \"\\n\") # write FWD + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) +",
"+ FWD + \"\\t\" + FWD_qual + \"\\n\") # write FWD + FWD",
"+ \"\\t\" + chrom + \"\\t\" + start + \"\\t\" + end +",
"\"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info into dictionary for line in primer_in: key,",
"= open(input_primers) primers = {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all",
"str(primer_index) + \"_GC_PERCENT\")] # bowtie input file # write FWD + REV primer_file.write(circ_ID",
"write REV + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_FWD\" + \"\\t\")",
"+ circ_ID + \"_primer\" + str(primer_index) + \"_\" + amplicon + \"\\n\") #",
"\"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] # bowtie input file #",
"+'\\t' + REV_len + '\\t' + PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM + '\\t'",
"= template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\" + str(primer_index) +",
"REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\"",
"# general primer file (for filtering), first put in dict, will be sorted",
"\"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") # write FWD +",
"args.i[0] primer_in = open(input_primers) primers = {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") #",
"\"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") # get amplicon and",
"all_amplicon all_primers_sorted = {k: v for k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])}",
"+ \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + REV + \"\\t\"",
"amplicon + '\\n'] = len(amplicon) # sort primers according to amp size (smallest",
"amplicon and make file for NUPACK FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len",
"+ \"\\n\") # get amplicon and make file for NUPACK FWD_pos, FWD_len =",
"put in dict, will be sorted (see below) all_primers_dict[circ_ID + \"\\t\" + chrom",
"= open(\"all_primers_\" + circ_ID + \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID +",
"+ 1] all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\" + str(primer_index) + \"_\" +",
"PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon + '\\n'] = len(amplicon) # sort primers according",
"+ \"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) +",
"+ '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon +",
"primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read",
"primers['PRIMER_LEFT_'+ str(primer_index)].split(\",\") REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\">",
"arguments to main primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input primer file') args =",
"# write FWD + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_REV\" +",
"file general_info = open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\") for info in primers:",
"+ PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' +",
"amplicon_\" + circ_ID + \"_primer\" + str(primer_index) + \"_\" + amplicon + \"\\n\")",
"be sorted (see below) all_primers_dict[circ_ID + \"\\t\" + chrom + \"\\t\" + start",
"general info into file general_info = open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\") for",
"k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])} for primer in all_primers_sorted: all_primers.write(primer) primer_file.close()",
"+ PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon + '\\n'] = len(amplicon) # sort primers",
"+ \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + FWD + \"\\t\"",
"\"_primer_\" + str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual +",
"= len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM =",
"= len(amplicon) # sort primers according to amp size (smallest is best) and",
"+ '\\t' + amplicon + '\\n'] = len(amplicon) # sort primers according to",
"= circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info into file general_info",
"size (smallest is best) and then print to all_amplicon all_primers_sorted = {k: v",
"\"\\t\" + FWD_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\")",
"primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + REV + \"\\t\" + REV_qual",
"+ REV_pos +'\\t' + REV_len + '\\t' + PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM",
"len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV)",
"input file # write FWD + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) +",
"FWD_qual + \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") # write",
"str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\"",
"FWD_qual + \"\\n\") # write FWD + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index)",
"+ FWD_qual + \"\\n\") # write REV + REV primer_file.write(circ_ID + \"_primer_\" +",
"'\\n'] = len(amplicon) # sort primers according to amp size (smallest is best)",
"general_info = open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\") for info in primers: if",
"\"\\t\" + FWD_qual + \"\\n\") # write FWD + FWD primer_file.write(circ_ID + \"_primer_\"",
"+ PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' +",
"\"\\t\" + REV_qual + \"\\n\") # write REV + FWD primer_file.write(circ_ID + \"_primer_\"",
"\"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + REV +",
"write REV + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_REV\" + \"\\t\")",
"REV + '\\t' + FWD_pos + '\\t' + FWD_len + '\\t' + REV_pos",
"+ FWD + '\\t' + REV + '\\t' + FWD_pos + '\\t' +",
"PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon + '\\n'] = len(amplicon)",
"len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\"",
"primer_in = open(input_primers) primers = {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read",
"+ \"\\n\") # general primer file (for filtering), first put in dict, will",
"\"_EXPLAIN\" in info or any(x in info for x in circ_info_keys): general_info.write(info +",
"info for x in circ_info_keys): general_info.write(info + '=' + str(primers[info]) +'\\n') general_info.close() #",
"parser.parse_args() input_primers = args.i[0] primer_in = open(input_primers) primers = {} circ_info_keys = (\"SEQUENCE_ID\",",
"= {} for primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")]",
"{} for primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual",
"primers all_primers = open(\"all_primers_\" + circ_ID + \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" +",
"FWD_len + '\\t' + REV_pos +'\\t' + REV_len + '\\t' + PRIMER_LEFT_TM +",
"into file general_info = open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\") for info in",
"\"_\" + amplicon + \"\\n\") # general primer file (for filtering), first put",
"+ REV + \"\\t\" + REV_qual + \"\\n\") # get amplicon and make",
"primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\" +",
"info in primers: if \"_NUM_\" in info or \"_EXPLAIN\" in info or any(x",
"FWD + \"\\t\" + FWD_qual + \"\\n\") # write FWD + FWD primer_file.write(circ_ID",
"str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\" +",
"file # write FWD + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_REV\"",
"+ \".txt\", 'w') all_primers_dict = {} for primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\"",
"'\\t' + amplicon + '\\n'] = len(amplicon) # sort primers according to amp",
"\"_GC_PERCENT\")] # bowtie input file # write FWD + REV primer_file.write(circ_ID + \"_primer_\"",
"general_info.write(info + '=' + str(primers[info]) +'\\n') general_info.close() # make file for bowtie primer_file",
"REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM",
"primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + REV + \"\\t\" + REV_qual",
"list primers all_primers = open(\"all_primers_\" + circ_ID + \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\"",
"argparse.ArgumentParser(description='give arguments to main primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input primer file') args",
"amp size (smallest is best) and then print to all_amplicon all_primers_sorted = {k:",
"= primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")]",
"'\\t' + FWD + '\\t' + REV + '\\t' + FWD_pos + '\\t'",
"+ '\\t' + FWD_pos + '\\t' + FWD_len + '\\t' + REV_pos +'\\t'",
"amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\" + str(primer_index)",
"'\\t' + PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t'",
"primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\"",
"(for filtering), first put in dict, will be sorted (see below) all_primers_dict[circ_ID +",
"+ \"\\t\" + REV_qual + \"\\n\") # write REV + FWD primer_file.write(circ_ID +",
"= line.split(\"=\") value = value.rstrip() primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA =",
"primers according to amp size (smallest is best) and then print to all_amplicon",
"+ amplicon + \"\\n\") # general primer file (for filtering), first put in",
"str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" +",
"REV_pos +'\\t' + REV_len + '\\t' + PRIMER_LEFT_TM + '\\t' + PRIMER_RIGHT_TM +",
"<gh_stars>0 #!/usr/bin/python3 import argparse parser = argparse.ArgumentParser(description='give arguments to main primer_xc script') parser.add_argument('-i',",
"+ \"\\t\" + FWD_qual + \"\\t\" + REV + \"\\t\" + REV_qual +",
"argparse parser = argparse.ArgumentParser(description='give arguments to main primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input",
"+ '\\t' + str(primer_index) + '\\t' + FWD + '\\t' + REV +",
"REV + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_FWD\" + \"\\t\") primer_file.write(REV",
"\"\\n\") # write FWD + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_FWD\"",
"+ \"_SEQUENCE\")] REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) +",
"\"\\t\" + chrom + \"\\t\" + start + \"\\t\" + end + '\\t'",
"+ '\\t' + FWD + '\\t' + REV + '\\t' + FWD_pos +",
"{} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info into dictionary for",
"+ '\\t' + FWD_len + '\\t' + REV_pos +'\\t' + REV_len + '\\t'",
"circ_info_keys): general_info.write(info + '=' + str(primers[info]) +'\\n') general_info.close() # make file for bowtie",
"file for bowtie primer_file = open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\") # make",
"primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\"",
"+ \"_primer_\" + str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual",
"circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info into dictionary for line",
"+ \"\\t\" + REV_qual + \"\\n\") # get amplicon and make file for",
"\"\\t\" + end + '\\t' + str(primer_index) + '\\t' + FWD + '\\t'",
"circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close()",
"PRIMER_LEFT_TM = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) +",
"primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] #",
"script') parser.add_argument('-i', nargs=1, required=True, help='input primer file') args = parser.parse_args() input_primers = args.i[0]",
"\".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w') all_primers_dict = {}",
"+ str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT =",
"len(amplicon) # sort primers according to amp size (smallest is best) and then",
"'=' + str(primers[info]) +'\\n') general_info.close() # make file for bowtie primer_file = open(\"primer_spec_input_\"",
"best) and then print to all_amplicon all_primers_sorted = {k: v for k, v",
"and then print to all_amplicon all_primers_sorted = {k: v for k, v in",
"# write REV + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_REV\" +",
"open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\") for info in primers: if \"_NUM_\" in",
"or any(x in info for x in circ_info_keys): general_info.write(info + '=' + str(primers[info])",
"+ str(primer_index) + '\\t' + FWD + '\\t' + REV + '\\t' +",
"main primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input primer file') args = parser.parse_args() input_primers",
"+ REV_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") #",
"+ PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t' + amplicon + '\\n'] =",
"= open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\") for info in primers: if \"_NUM_\"",
"+ FWD_qual + \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") #",
"# make file for bowtie primer_file = open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\")",
"str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] # bowtie input",
"+ \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + FWD + \"\\t\"",
"= primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end = circRNA.split(\"_\") nr_p_out = primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() #",
"+ str(primers[info]) +'\\n') general_info.close() # make file for bowtie primer_file = open(\"primer_spec_input_\" +",
"primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual",
"REV_qual + \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") # get",
"primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] # bowtie input file # write FWD +",
"+ \"_\" + amplicon + \"\\n\") # general primer file (for filtering), first",
"\"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")]",
"+ circ_ID + \".txt\", 'w') all_primers_dict = {} for primer_index in range(int(nr_p_out)): FWD",
"in info or \"_EXPLAIN\" in info or any(x in info for x in",
"read general info into file general_info = open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\")",
"= len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual =",
"= primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end = circRNA.split(\"_\") nr_p_out =",
"all_primers_sorted = {k: v for k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])} for",
"+ circ_ID + \".txt\", \"a\") for info in primers: if \"_NUM_\" in info",
"+ \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\") # write FWD",
"= open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\") # make general file with list",
"REV_qual + \"\\n\") # write REV + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index)",
"if \"_NUM_\" in info or \"_EXPLAIN\" in info or any(x in info for",
"all info into dictionary for line in primer_in: key, value = line.split(\"=\") value",
"circ_ID + \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w') all_primers_dict",
"FWD + \"\\t\" + FWD_qual + \"\\n\") # write REV + REV primer_file.write(circ_ID",
"FWD + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD",
"primer_in: key, value = line.split(\"=\") value = value.rstrip() primers[key] = value template =",
"print to all_amplicon all_primers_sorted = {k: v for k, v in sorted(all_primers_dict.items(), key=lambda",
"str(primer_index) + '\\t' + FWD + '\\t' + REV + '\\t' + FWD_pos",
"'\\t' + str(primer_index) + '\\t' + FWD + '\\t' + REV + '\\t'",
"import argparse parser = argparse.ArgumentParser(description='give arguments to main primer_xc script') parser.add_argument('-i', nargs=1, required=True,",
"primer_file = open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\") # make general file with",
"= (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info into dictionary for line in",
"+ \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + REV + \"\\t\"",
"to main primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input primer file') args = parser.parse_args()",
"+ \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") # write REV",
"\"\\t\" + REV_qual + \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\")",
"\"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV) * \"I\"",
"end + '\\t' + str(primer_index) + '\\t' + FWD + '\\t' + REV",
"line.split(\"=\") value = value.rstrip() primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"]",
"open(\"primer_spec_input_\" + circ_ID + \".txt\", \"a\") # make general file with list primers",
"str(primer_index) + \"_SEQUENCE\")] FWD_qual = len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index)",
"general_info.close() # make file for bowtie primer_file = open(\"primer_spec_input_\" + circ_ID + \".txt\",",
"= primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV) * \"I\" PRIMER_LEFT_TM =",
"+'\\n') general_info.close() # make file for bowtie primer_file = open(\"primer_spec_input_\" + circ_ID +",
"sort primers according to amp size (smallest is best) and then print to",
"with list primers all_primers = open(\"all_primers_\" + circ_ID + \".txt\", 'w') all_amplicon =",
"bowtie input file # write FWD + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index)",
"sorted (see below) all_primers_dict[circ_ID + \"\\t\" + chrom + \"\\t\" + start +",
"primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\" +",
"for line in primer_in: key, value = line.split(\"=\") value = value.rstrip() primers[key] =",
"\"_NUM_\" in info or \"_EXPLAIN\" in info or any(x in info for x",
"+ '=' + str(primers[info]) +'\\n') general_info.close() # make file for bowtie primer_file =",
"\"\\t\" + start + \"\\t\" + end + '\\t' + str(primer_index) + '\\t'",
"+ '\\t' + REV + '\\t' + FWD_pos + '\\t' + FWD_len +",
"in circ_info_keys): general_info.write(info + '=' + str(primers[info]) +'\\n') general_info.close() # make file for",
"primers = {} circ_info_keys = (\"SEQUENCE_ID\", \"SEQUENCE_TEMPLATE\", \"SEQUENCE_TARGET\") # read all info into",
"# write FWD + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_FWD\" +",
"= argparse.ArgumentParser(description='give arguments to main primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input primer file')",
"\".txt\", \"a\") for info in primers: if \"_NUM_\" in info or \"_EXPLAIN\" in",
"required=True, help='input primer file') args = parser.parse_args() input_primers = args.i[0] primer_in = open(input_primers)",
"+ \"\\t\" + REV + \"\\t\" + REV_qual + \"\\n\") # get amplicon",
"+ \"_primer_\" + str(primer_index) + \"_REV_REV\" + \"\\t\") primer_file.write(REV + \"\\t\" + REV_qual",
"\"SEQUENCE_TARGET\") # read all info into dictionary for line in primer_in: key, value",
"\"\\t\" + REV_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual + \"\\n\")",
"template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom, start, end = circRNA.split(\"_\") nr_p_out",
"1] all_amplicon.write(\"> amplicon_\" + circ_ID + \"_primer\" + str(primer_index) + \"_\" + amplicon",
"= value.rstrip() primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA = primers[\"SEQUENCE_ID\"] circ_ID, chrom,",
"primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT",
"REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" + circ_ID",
"for k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])} for primer in all_primers_sorted: all_primers.write(primer)",
"+ FWD + \"\\t\" + FWD_qual + \"\\n\") # write REV + REV",
"v for k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])} for primer in all_primers_sorted:",
"REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(\",\") amplicon = template[int(FWD_pos):int(REV_pos) + 1] all_amplicon.write(\"> amplicon_\" +",
"primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_FWD\" + \"\\t\") primer_file.write(FWD + \"\\t\" +",
"= primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info into file general_info = open(\"general_primer_design_info_\" +",
"open(\"amplicon_folding_input_\" + circ_ID + \".txt\", 'w') all_primers_dict = {} for primer_index in range(int(nr_p_out)):",
"# make general file with list primers all_primers = open(\"all_primers_\" + circ_ID +",
"+ circ_ID + \".txt\", \"a\") # make general file with list primers all_primers",
"PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_GC_PERCENT\")] # bowtie input file # write",
"info into file general_info = open(\"general_primer_design_info_\" + circ_ID + \".txt\", \"a\") for info",
"for primer_index in range(int(nr_p_out)): FWD = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_SEQUENCE\")] FWD_qual =",
"FWD + '\\t' + REV + '\\t' + FWD_pos + '\\t' + FWD_len",
"+ \"\\t\" + REV_qual + \"\\t\" + REV + \"\\t\" + REV_qual +",
"+ \"\\t\" + FWD_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual +",
"FWD_qual + \"\\n\") # write REV + REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index)",
"according to amp size (smallest is best) and then print to all_amplicon all_primers_sorted",
"\"\\t\") primer_file.write(FWD + \"\\t\" + FWD_qual + \"\\t\" + FWD + \"\\t\" +",
"+ \"\\t\" + REV_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual +",
"+ start + \"\\t\" + end + '\\t' + str(primer_index) + '\\t' +",
"all_primers = open(\"all_primers_\" + circ_ID + \".txt\", 'w') all_amplicon = open(\"amplicon_folding_input_\" + circ_ID",
"primers[\"PRIMER_LEFT_NUM_RETURNED\"] primer_in.close() # read general info into file general_info = open(\"general_primer_design_info_\" + circ_ID",
"'\\t' + PRIMER_RIGHT_TM + '\\t' + PRIMER_LEFT_GC_PERCENT + '\\t' + PRIMER_RIGHT_GC_PERCENT + '\\t'",
"+ FWD_qual + \"\\n\") # write FWD + FWD primer_file.write(circ_ID + \"_primer_\" +",
"\"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index)",
"PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) +",
"REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD + \"\\t\"",
"value = line.split(\"=\") value = value.rstrip() primers[key] = value template = primers[\"SEQUENCE_TEMPLATE\"] circRNA",
"dict, will be sorted (see below) all_primers_dict[circ_ID + \"\\t\" + chrom + \"\\t\"",
"FWD_qual = len(FWD) * \"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual",
"+ \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\" + str(primer_index) + \"_GC_PERCENT\")] PRIMER_RIGHT_GC_PERCENT = primers[(\"PRIMER_RIGHT_\" +",
"str(primer_index) + \"_TM\")] PRIMER_RIGHT_TM = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_TM\")] PRIMER_LEFT_GC_PERCENT = primers[(\"PRIMER_LEFT_\"",
"start + \"\\t\" + end + '\\t' + str(primer_index) + '\\t' + FWD",
"'\\t' + REV + '\\t' + FWD_pos + '\\t' + FWD_len + '\\t'",
"general file with list primers all_primers = open(\"all_primers_\" + circ_ID + \".txt\", 'w')",
"REV + \"\\t\" + REV_qual + \"\\n\") # write REV + FWD primer_file.write(circ_ID",
"primer_file.write(REV + \"\\t\" + REV_qual + \"\\t\" + FWD + \"\\t\" + FWD_qual",
"v in sorted(all_primers_dict.items(), key=lambda item: item[1])} for primer in all_primers_sorted: all_primers.write(primer) primer_file.close() all_primers.close()",
"parser = argparse.ArgumentParser(description='give arguments to main primer_xc script') parser.add_argument('-i', nargs=1, required=True, help='input primer",
"+ REV primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_FWD_REV\" + \"\\t\") primer_file.write(FWD +",
"is best) and then print to all_amplicon all_primers_sorted = {k: v for k,",
"# write REV + FWD primer_file.write(circ_ID + \"_primer_\" + str(primer_index) + \"_REV_FWD\" +",
"will be sorted (see below) all_primers_dict[circ_ID + \"\\t\" + chrom + \"\\t\" +",
"* \"I\" REV = primers[(\"PRIMER_RIGHT_\" + str(primer_index) + \"_SEQUENCE\")] REV_qual = len(REV) *"
] |
[
"print(f'Finish {rmse.shape[0]} run') return rmse # END run_source_lsq() # read parameter distributions datapath",
"of models and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies",
"== 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index =",
"pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations",
"the sum of timeseries of different temporal scale. temp_scale: str, default is 'Y',",
"i] # set the time period of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')]",
"= 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record, _, _,",
"time period of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling",
"= pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] =",
"= 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria",
"# obtain the sum at a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]],",
"the parameter values to initial values for vs in vs_list: vs = change_param_values(vs,",
"\"wb\")) # set the parameter values to initial values for vs in vs_list:",
"emulator pce.build() # store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the",
"sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End timeseries_sum() # import",
"of different temporal scale. temp_scale: str, default is 'Y', monthly using 'M' \"\"\"",
"the output.txt requires the use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din =",
"obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A - obs_din rmse = (np.mean(resid",
"a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale",
"output.txt requires the use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv',",
"modeling_settings, generate_observation_ensemble import spotpy as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') #",
"para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables for PCE param_file =",
"month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in",
"funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv',",
"the time period of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the",
"scale given is not supported.' if temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else:",
"max_level = 6 err_tol = 1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function",
"product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples = 10000",
"and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8",
"i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set the time period of",
"# Create the copy of models and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name",
"= [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period and the recording variables _,",
"pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A - obs_din rmse = (np.mean(resid ** 2,",
"first_port=15000; num_copies = 8 _, things_to_record, _, _, _ = modeling_settings() processes, ports",
"max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function,",
"PyApprox model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples)",
"list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record,",
"import pandas as pd from veneer.pest_runtime import * from veneer.manage import start,kill_all_now import",
"= 6 err_tol = 1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function =",
"Define criteria max_level = 6 err_tol = 1e-8 max_num_samples = 100 max_level_1d =",
"veneer.manage import start,kill_all_now import pyapprox as pya from functools import partial from pyapprox.adaptive_sparse_grid",
"or monthly loads def timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain the sum of",
"import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the copy of models",
"PCE param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable,",
"Generate emulator pce.build() # store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set",
"'The temporal scale given is not supported.' if temp_scale == 'monthly': sum_126001A =",
"import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import",
"the sum at a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A",
"vs_list=vs_list): \"\"\" Script used to run_source and return the output file. The function",
"import pyapprox as pya from functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from",
"enforce_bounds=True) # Create PyApprox model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce",
"candidate_samples=candidate_samples) # Define criteria max_level = 6 err_tol = 1e-8 max_num_samples = 100",
"pce.build() # store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter",
"'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record, _, _, _",
"= sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale",
"pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build() # store",
"# Define objective functions # Use annual or monthly loads def timeseries_sum(df, temp_scale",
"observation if the output.txt requires the use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30'])",
"err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate",
"1000 * x) # loop over the vars and try to use parallel",
"import observation if the output.txt requires the use of obs. date_range = pd.to_datetime(['2009/07/01',",
"from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as sp print('Read Parameters') parameters =",
"Use annual or monthly loads def timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain the",
"timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid",
"criteria, start_date, end_date, parameter_df, retrieve_time) # obtain the sum at a given temporal",
"vars[:, i] # set the time period of the results retrieve_time = [pd.Timestamp('2009-07-01'),",
"= din_126001A - obs_din rmse = (np.mean(resid ** 2, axis=0)) ** 0.5 if",
"spotpy as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions",
"file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create",
"run_source_lsq() # read parameter distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv')",
"is not supported.' if temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A =",
"if temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A =",
"parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions # Use annual or monthly",
"# Create PyApprox model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce =",
"temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din",
"obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values #",
"function is called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy",
"modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) #",
"timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values",
"read parameter distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define",
"= AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(),",
"+ 'Parameters-PCE.csv') # define the variables for PCE param_file = file_settings()[-1] ind_vars, variable",
"veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _,",
"i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End",
"temp_scale: str, default is 'Y', monthly using 'M' \"\"\" assert temp_scale in ['monthly',",
"variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep,",
"[pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period and the recording variables _, _,",
"_, _ = modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list =",
"import numpy as np import pandas as pd from veneer.pest_runtime import * from",
"observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000",
"-np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level = 6 err_tol",
"rmse[0] == 0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run')",
"run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to run_source and return the output file. The",
"as pya from functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import",
"timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain the sum of timeseries of different temporal",
"6 err_tol = 1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial(",
"x: 1000 * x) # loop over the vars and try to use",
"rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse # END run_source_lsq() #",
"10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level",
"is 'Y', monthly using 'M' \"\"\" assert temp_scale in ['monthly', 'annual'], 'The temporal",
"candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level =",
"file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables for PCE param_file",
"the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period and the",
"results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period and the recording",
"pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter values to initial values for",
"set the parameter values to initial values for vs in vs_list: vs =",
"max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) #",
"admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build() # store PCE import pickle pickle.dump(pce,",
"veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain the initial values of parameters initial_values",
"default is 'Y', monthly using 'M' \"\"\" assert temp_scale in ['monthly', 'annual'], 'The",
"1) print(f'Finish {rmse.shape[0]} run') return rmse # END run_source_lsq() # read parameter distributions",
"the initial values of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script",
"= 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse # END",
"partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth",
"variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples =",
"of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period and",
"# set the parameter values to initial values for vs in vs_list: vs",
"and the recording variables _, _, criteria, start_date, end_date = modeling_settings() din =",
"['monthly', 'annual'], 'The temporal scale given is not supported.' if temp_scale == 'monthly':",
"# END run_source_lsq() # read parameter distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath",
":] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End timeseries_sum() # import observation",
"1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse # END run_source_lsq()",
"output file. The function is called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings,",
"temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index",
"file_settings from funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the",
"range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set the time period of the results",
"breakpoint() resid = din_126001A - obs_din rmse = (np.mean(resid ** 2, axis=0)) **",
"by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as sp print('Read",
"param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True)",
"= pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables for PCE param_file = file_settings()[-1]",
"df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return",
"obtain_initials, change_param_values # Create the copy of models and veneer list project_name =",
"change_param_values # Create the copy of models and veneer list project_name = 'MW_BASE_RC10.rsproj'",
"= month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End timeseries_sum() # import observation if",
"pd.Timestamp('2018-06-30')] # define the modeling period and the recording variables _, _, criteria,",
"= -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level = 6",
"obs_din rmse = (np.mean(resid ** 2, axis=0)) ** 0.5 if rmse[0] == 0:",
"pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level = 6 err_tol = 1e-8 max_num_samples =",
"AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs,",
"temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.' if temp_scale",
"= df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns)",
"temp_scale = 'annual'): \"\"\" Obtain the sum of timeseries of different temporal scale.",
"din_126001A - obs_din rmse = (np.mean(resid ** 2, axis=0)) ** 0.5 if rmse[0]",
"max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation",
"# define the variables for PCE param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file,",
"temporal scale given is not supported.' if temp_scale == 'monthly': sum_126001A = df.resample('M').sum()",
"= partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions(",
"paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain the initial values",
"din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) # obtain the sum at",
"start,kill_all_now import pyapprox as pya from functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function",
"and try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]):",
"df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for",
"values of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to",
"return sum_126001A # End timeseries_sum() # import observation if the output.txt requires the",
"_ = modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports,",
"of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to run_source",
"not supported.' if temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum()",
"the copy of models and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe'",
"Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions # Use annual or",
"sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions # Use",
"open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter values to initial values for vs in",
"from funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the copy",
"din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A - obs_din",
"= timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint()",
"variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create",
"= 'annual'): \"\"\" Obtain the sum of timeseries of different temporal scale. temp_scale:",
"# obtain the initial values of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list):",
"start_date, end_date, parameter_df, retrieve_time) # obtain the sum at a given temporal scale",
"var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples = 10000 candidate_samples =",
"if rmse[0] == 0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]}",
"processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain",
"date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din =",
"= 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A",
"'Parameters-PCE.csv') # define the variables for PCE param_file = file_settings()[-1] ind_vars, variable =",
"called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as sp",
"of timeseries of different temporal scale. temp_scale: str, default is 'Y', monthly using",
"start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) #",
"= file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) #",
"run_source and return the output file. The function is called by AdaptiveLejaPCE. \"\"\"",
"= 'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din =",
"np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()",
"import variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values #",
"return the output file. The function is called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs",
"clenshaw_curtis_rule_growth ) # Generate emulator pce.build() # store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl',",
"1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d,",
"= vs_settings(ports, things_to_record) # obtain the initial values of parameters initial_values = obtain_initials(vs_list[0])",
"requires the use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')",
"End timeseries_sum() # import observation if the output.txt requires the use of obs.",
"pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)",
"timeseries_sum() # import observation if the output.txt requires the use of obs. date_range",
"temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid =",
"dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples = 10000 candidate_samples",
"= pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set",
"variables _, _, criteria, start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date,",
"Script used to run_source and return the output file. The function is called",
"define the modeling period and the recording variables _, _, criteria, start_date, end_date",
") # Generate emulator pce.build() # store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\"))",
"'annual'], 'The temporal scale given is not supported.' if temp_scale == 'monthly': sum_126001A",
"assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.' if",
"generate_observation_ensemble import spotpy as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define",
"timeseries of different temporal scale. temp_scale: str, default is 'Y', monthly using 'M'",
"index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) #",
"'M' \"\"\" assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not",
"functions # Use annual or monthly loads def timeseries_sum(df, temp_scale = 'annual'): \"\"\"",
"sum_126001A # End timeseries_sum() # import observation if the output.txt requires the use",
"print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions # Use annual",
"'2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x:",
"columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A",
"supported.' if temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A",
"if the output.txt requires the use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din",
"numpy as np import pandas as pd from veneer.pest_runtime import * from veneer.manage",
"store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter values to",
"import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import",
"return rmse # END run_source_lsq() # read parameter distributions datapath = file_settings()[1] para_info",
"vs_settings(ports, things_to_record) # obtain the initial values of parameters initial_values = obtain_initials(vs_list[0]) def",
"model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) #",
"'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A -",
"period of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period",
"- obs_din rmse = (np.mean(resid ** 2, axis=0)) ** 0.5 if rmse[0] ==",
"pd from veneer.pest_runtime import * from veneer.manage import start,kill_all_now import pyapprox as pya",
"modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the copy of models and veneer list",
":].sum() return sum_126001A # End timeseries_sum() # import observation if the output.txt requires",
"resid = din_126001A - obs_din rmse = (np.mean(resid ** 2, axis=0)) ** 0.5",
"_, criteria, start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df,",
"\"\"\" Script used to run_source and return the output file. The function is",
"_, _, criteria, start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date,",
"= variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build()",
"vs_list = vs_settings(ports, things_to_record) # obtain the initial values of parameters initial_values =",
"max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator =",
"period and the recording variables _, _, criteria, start_date, end_date = modeling_settings() din",
"generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) # obtain the sum at a given",
"funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values",
"\"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as sp print('Read Parameters') parameters",
"from veneer.pest_runtime import * from veneer.manage import start,kill_all_now import pyapprox as pya from",
"vars and try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in",
"from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings",
"objective functions # Use annual or monthly loads def timeseries_sum(df, temp_scale = 'annual'):",
"to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] =",
"'Y', monthly using 'M' \"\"\" assert temp_scale in ['monthly', 'annual'], 'The temporal scale",
"in ['monthly', 'annual'], 'The temporal scale given is not supported.' if temp_scale ==",
"parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] #",
"recording variables _, _, criteria, start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria,",
"the use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index",
"use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index =",
"str, default is 'Y', monthly using 'M' \"\"\" assert temp_scale in ['monthly', 'annual'],",
"pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions # Use annual or monthly loads def",
"100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator",
"* from veneer.manage import start,kill_all_now import pyapprox as pya from functools import partial",
"refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build() # store PCE import pickle",
"parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i]",
"at a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din,",
"the vars and try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i",
"the recording variables _, _, criteria, start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list,",
"parameter_df, retrieve_time) # obtain the sum at a given temporal scale # din_pbias",
"END run_source_lsq() # read parameter distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath +",
"for PCE param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans =",
"pyapprox as pya from functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos",
"scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din",
"axis=0)) ** 0.5 if rmse[0] == 0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0],",
"from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from",
"num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain the initial values of",
"set the time period of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define",
"# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din =",
"[max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq,",
"retrieve_time) # obtain the sum at a given temporal scale # din_pbias =",
"\"\"\" Obtain the sum of timeseries of different temporal scale. temp_scale: str, default",
"sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale =",
"admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans)",
"= 8 _, things_to_record, _, _, _ = modeling_settings() processes, ports = paralell_vs(first_port,",
"din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual')",
"'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record, _, _, _ = modeling_settings() processes,",
"import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import",
"{rmse.shape[0]} run') return rmse # END run_source_lsq() # read parameter distributions datapath =",
"rmse # END run_source_lsq() # read parameter distributions datapath = file_settings()[1] para_info =",
"month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End timeseries_sum() # import observation if the",
"* x) # loop over the vars and try to use parallel parameter_df",
"PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter values to initial",
"paralell_vs, obtain_initials, change_param_values # Create the copy of models and veneer list project_name",
"annual or monthly loads def timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain the sum",
"temporal scale. temp_scale: str, default is 'Y', monthly using 'M' \"\"\" assert temp_scale",
"project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain the initial values of parameters",
"(i+1)*12, :].sum() return sum_126001A # End timeseries_sum() # import observation if the output.txt",
"= df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]):",
"# loop over the vars and try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]),",
"= (np.mean(resid ** 2, axis=0)) ** 0.5 if rmse[0] == 0: rmse[0] =",
"\\ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the copy of models and veneer",
"df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i,",
"for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set the time period",
"given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale =",
":].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop over the vars and try to",
"= pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop over",
"== 0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return",
"to initial values for vs in vs_list: vs = change_param_values(vs, initial_values, fromList=True) kill_all_now(processes)",
"_, things_to_record, _, _, _ = modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name,",
"as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions #",
"din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A",
"observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop over the vars and try",
"initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to run_source and return",
"from functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from",
"var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build() # store PCE",
"# define the modeling period and the recording variables _, _, criteria, start_date,",
"pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data",
"= np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12,",
"Create PyApprox model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(),",
"from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import vs_settings,",
"and return the output file. The function is called by AdaptiveLejaPCE. \"\"\" from",
"0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse",
"obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din",
"as pd from veneer.pest_runtime import * from veneer.manage import start,kill_all_now import pyapprox as",
"_, _, _ = modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list",
"given is not supported.' if temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A",
"end_date, parameter_df, retrieve_time) # obtain the sum at a given temporal scale #",
"pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from",
"pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set the",
"pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level = 6 err_tol = 1e-8",
"pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda",
"pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A - obs_din rmse =",
"Create the copy of models and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name =",
"err_tol = 1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function,",
"= 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol)",
"= pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A - obs_din rmse",
"monthly loads def timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain the sum of timeseries",
"import start,kill_all_now import pyapprox as pya from functools import partial from pyapprox.adaptive_sparse_grid import",
"variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build() #",
"pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12:",
"datapath = file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables for",
"Define objective functions # Use annual or monthly loads def timeseries_sum(df, temp_scale =",
"= pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions # Use annual or monthly loads",
"n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level = 6 err_tol =",
"import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \\ modeling_settings,",
"= variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples",
"= generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) # obtain the sum at a",
"clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import",
"sum at a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A =",
"the modeling period and the recording variables _, _, criteria, start_date, end_date =",
"ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain the",
"= pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A - obs_din rmse = (np.mean(resid **",
"else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i",
"# Generate emulator pce.build() # store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) #",
"def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to run_source and return the output file.",
"variables for PCE param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans",
"things_to_record) # obtain the initial values of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars,",
"= 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record, _, _, _ = modeling_settings()",
"sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),",
"= observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop over the vars and",
"# Define criteria max_level = 6 err_tol = 1e-8 max_num_samples = 100 max_level_1d",
"observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop over the vars",
"initial values of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used",
"= pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level = 6 err_tol = 1e-8 max_num_samples",
"2, axis=0)) ** 0.5 if rmse[0] == 0: rmse[0] = 1e-8 rmse =",
"end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) # obtain",
"over the vars and try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for",
"# End timeseries_sum() # import observation if the output.txt requires the use of",
"import modeling_settings, generate_observation_ensemble import spotpy as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')",
"# store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter values",
"monthly using 'M' \"\"\" assert temp_scale in ['monthly', 'annual'], 'The temporal scale given",
"the output file. The function is called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import",
"# set the time period of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] #",
"= 1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level,",
"columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set the time",
"modeling period and the recording variables _, _, criteria, start_date, end_date = modeling_settings()",
"criteria max_level = 6 err_tol = 1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars)",
"of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index)",
"rmse = (np.mean(resid ** 2, axis=0)) ** 0.5 if rmse[0] == 0: rmse[0]",
"use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:,",
"= rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse # END run_source_lsq() # read",
"'monthly': sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year,",
"= vars[:, i] # set the time period of the results retrieve_time =",
"AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples))",
"# breakpoint() resid = din_126001A - obs_din rmse = (np.mean(resid ** 2, axis=0))",
"# read parameter distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') #",
"= [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator",
"** 0.5 if rmse[0] == 0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1)",
"def timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain the sum of timeseries of different",
"parameter values to initial values for vs in vs_list: vs = change_param_values(vs, initial_values,",
"parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to run_source and",
"# Use annual or monthly loads def timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain",
"'annual'): \"\"\" Obtain the sum of timeseries of different temporal scale. temp_scale: str,",
"run') return rmse # END run_source_lsq() # read parameter distributions datapath = file_settings()[1]",
"obtain the sum at a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])",
"the variables for PCE param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False)",
"criteria, start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time)",
"models and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies =",
"= file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables for PCE",
"x) # loop over the vars and try to use parallel parameter_df =",
"= timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A =",
"pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables for PCE param_file = file_settings()[-1] ind_vars,",
"to run_source and return the output file. The function is called by AdaptiveLejaPCE.",
"project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record, _,",
"values to initial values for vs in vs_list: vs = change_param_values(vs, initial_values, fromList=True)",
"index_col='Index') # Define objective functions # Use annual or monthly loads def timeseries_sum(df,",
"parameter distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define the",
"variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model",
"pandas as pd from veneer.pest_runtime import * from veneer.manage import start,kill_all_now import pyapprox",
"try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i]",
"observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop",
"= obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to run_source and return the",
"veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record, _, _, _ =",
"= modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) # obtain the",
"refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator",
"loop over the vars and try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)",
"in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set the time period of the",
"things_to_record, _, _, _ = modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name)",
"pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop over the",
"\"\"\" assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'",
"ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox",
"sum of timeseries of different temporal scale. temp_scale: str, default is 'Y', monthly",
"from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials,",
"pya from functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator",
"pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build() # store PCE import",
"different temporal scale. temp_scale: str, default is 'Y', monthly using 'M' \"\"\" assert",
"** 2, axis=0)) ** 0.5 if rmse[0] == 0: rmse[0] = 1e-8 rmse",
"as np import pandas as pd from veneer.pest_runtime import * from veneer.manage import",
"= modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record)",
"np import pandas as pd from veneer.pest_runtime import * from veneer.manage import start,kill_all_now",
"define the variables for PCE param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform',",
"= paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain the initial",
"in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End timeseries_sum()",
"loads def timeseries_sum(df, temp_scale = 'annual'): \"\"\" Obtain the sum of timeseries of",
"0.5 if rmse[0] == 0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish",
"din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din,",
"for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A #",
"Obtain the sum of timeseries of different temporal scale. temp_scale: str, default is",
"import * from veneer.manage import start,kill_all_now import pyapprox as pya from functools import",
"is called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as",
"file. The function is called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble",
"vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the copy of models and",
"functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature",
"max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth",
"funcs.modeling_funcs import vs_settings, \\ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the copy of",
"from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from",
"'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values",
"partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator,",
"AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as sp print('Read Parameters')",
"retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period and the recording variables",
"used to run_source and return the output file. The function is called by",
"The function is called by AdaptiveLejaPCE. \"\"\" from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import",
"(np.mean(resid ** 2, axis=0)) ** 0.5 if rmse[0] == 0: rmse[0] = 1e-8",
"copy of models and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\\\FlowMatters.Source.VeneerCmd.exe' first_port=15000;",
"max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples,",
"sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :]",
"8 _, things_to_record, _, _, _ = modeling_settings() processes, ports = paralell_vs(first_port, num_copies,",
"obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\" Script used to run_source and return the output",
"temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual')",
"import pickle pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter values to initial values",
"num_copies = 8 _, things_to_record, _, _, _ = modeling_settings() processes, ports =",
"pickle.dump(pce, open(f'{file_settings()[0]}\\pce-rmse.pkl', \"wb\")) # set the parameter values to initial values for vs",
"rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse # END run_source_lsq() # read parameter",
"parameter_df.iloc[i] = vars[:, i] # set the time period of the results retrieve_time",
"scale. temp_scale: str, default is 'Y', monthly using 'M' \"\"\" assert temp_scale in",
"from veneer.manage import start,kill_all_now import pyapprox as pya from functools import partial from",
"= pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1],",
"distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables",
"veneer.pest_runtime import * from veneer.manage import start,kill_all_now import pyapprox as pya from functools",
"using 'M' \"\"\" assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is",
"modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) # obtain the sum",
"import spotpy as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective",
"pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \\",
"import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs",
"n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define",
"obtain the initial values of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): \"\"\"",
"rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse #",
"# import observation if the output.txt requires the use of obs. date_range =",
"= pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 *",
"max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth )",
"range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End timeseries_sum() #"
] |
[
"= [] self._set_fields() self.__prune_fields() def _set_fields(self): pass def __prune_fields(self): columns = inspect(self.__class__).columns if",
"'status' not in kwargs.keys(): kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class",
"from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield self.session.commit() except",
"_set_fields(self): pass def __prune_fields(self): columns = inspect(self.__class__).columns if not self._fields: all_columns = set(columns.keys())",
"init_on_load(self): self._fields = [] # self._include = [] self._exclude = [] self._set_fields() self.__prune_fields()",
"def auto_commit(self): try: yield self.session.commit() except Exception as e: db.session.rollback() raise e class",
"inspect, Column, Integer, SmallInteger, orm from contextlib import contextmanager from app.libs.error_code import NotFound",
"import datetime from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy import inspect,",
"Query(BaseQuery): def filter_query(self, **kwargs): if 'status' not in kwargs.keys(): kwargs['status'] = 1 return",
"not self._fields: all_columns = set(columns.keys()) self._fields = list(all_columns - set(self._exclude)) def hide(self, *args):",
"columns = inspect(self.__class__).columns if not self._fields: all_columns = set(columns.keys()) self._fields = list(all_columns -",
"not in kwargs.keys(): kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class =",
"kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query) class Base(db.Model):",
"*args): for key in args: self._fields.remove(key) return self def keys(self): return self._fields def",
"list(all_columns - set(self._exclude)) def hide(self, *args): for key in args: self._fields.remove(key) return self",
"contextmanager from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield self.session.commit()",
"contextlib import contextmanager from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try:",
"= Query) class Base(db.Model): __abstract__ = True def set_attrs(self, attrs_dict): for key, value",
"all_columns = set(columns.keys()) self._fields = list(all_columns - set(self._exclude)) def hide(self, *args): for key",
"import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield self.session.commit() except Exception as",
"from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy import inspect, Column, Integer,",
"in attrs_dict.items(): if hasattr(self, key) and key != 'id': setattr(self, key, value) def",
"[] # self._include = [] self._exclude = [] self._set_fields() self.__prune_fields() def _set_fields(self): pass",
"class Query(BaseQuery): def filter_query(self, **kwargs): if 'status' not in kwargs.keys(): kwargs['status'] = 1",
"setattr(self, key, value) def keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields",
"= SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__ = True def set_attrs(self, attrs_dict): for",
"db.session.rollback() raise e class Query(BaseQuery): def filter_query(self, **kwargs): if 'status' not in kwargs.keys():",
"attrs_dict): for key, value in attrs_dict.items(): if hasattr(self, key) and key != 'id':",
"self._include = [] self._exclude = [] self._set_fields() self.__prune_fields() def _set_fields(self): pass def __prune_fields(self):",
"self.session.commit() except Exception as e: db.session.rollback() raise e class Query(BaseQuery): def filter_query(self, **kwargs):",
"def hide(self, *args): for key in args: self._fields.remove(key) return self def keys(self): return",
"from datetime import datetime from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy",
"return self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields = [] # self._include =",
"datetime from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy import inspect, Column,",
"for key, value in attrs_dict.items(): if hasattr(self, key) and key != 'id': setattr(self,",
"set(columns.keys()) self._fields = list(all_columns - set(self._exclude)) def hide(self, *args): for key in args:",
"as e: db.session.rollback() raise e class Query(BaseQuery): def filter_query(self, **kwargs): if 'status' not",
"key in args: self._fields.remove(key) return self def keys(self): return self._fields def __getitem__(self, key):",
"in args: self._fields.remove(key) return self def keys(self): return self._fields def __getitem__(self, key): return",
"class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields = [] # self._include = [] self._exclude",
"_SQLAlchemy, BaseQuery from sqlalchemy import inspect, Column, Integer, SmallInteger, orm from contextlib import",
"self.__prune_fields() def _set_fields(self): pass def __prune_fields(self): columns = inspect(self.__class__).columns if not self._fields: all_columns",
"super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__ = True def",
"- set(self._exclude)) def hide(self, *args): for key in args: self._fields.remove(key) return self def",
"key, value) def keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields =",
"NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield self.session.commit() except Exception as e:",
"BaseQuery from sqlalchemy import inspect, Column, Integer, SmallInteger, orm from contextlib import contextmanager",
"if 'status' not in kwargs.keys(): kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs) db =",
"as _SQLAlchemy, BaseQuery from sqlalchemy import inspect, Column, Integer, SmallInteger, orm from contextlib",
"hasattr(self, key) and key != 'id': setattr(self, key, value) def keys(self): return self.fields",
"self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields = [] # self._include = []",
"@orm.reconstructor def init_on_load(self): self._fields = [] # self._include = [] self._exclude = []",
"[] self._exclude = [] self._set_fields() self.__prune_fields() def _set_fields(self): pass def __prune_fields(self): columns =",
"app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield self.session.commit() except Exception",
"SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield self.session.commit() except Exception as e: db.session.rollback() raise",
"__abstract__ = True def set_attrs(self, attrs_dict): for key, value in attrs_dict.items(): if hasattr(self,",
"= True def set_attrs(self, attrs_dict): for key, value in attrs_dict.items(): if hasattr(self, key)",
"= [] self._exclude = [] self._set_fields() self.__prune_fields() def _set_fields(self): pass def __prune_fields(self): columns",
"class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield self.session.commit() except Exception as e: db.session.rollback()",
"import contextmanager from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self): try: yield",
"inspect(self.__class__).columns if not self._fields: all_columns = set(columns.keys()) self._fields = list(all_columns - set(self._exclude)) def",
"set(self._exclude)) def hide(self, *args): for key in args: self._fields.remove(key) return self def keys(self):",
"db = SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__ = True def set_attrs(self, attrs_dict):",
"keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields = [] # self._include",
"@contextmanager def auto_commit(self): try: yield self.session.commit() except Exception as e: db.session.rollback() raise e",
"self._fields: all_columns = set(columns.keys()) self._fields = list(all_columns - set(self._exclude)) def hide(self, *args): for",
"'id': setattr(self, key, value) def keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self):",
"import inspect, Column, Integer, SmallInteger, orm from contextlib import contextmanager from app.libs.error_code import",
"1 return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__ =",
"import SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy import inspect, Column, Integer, SmallInteger, orm",
"e class Query(BaseQuery): def filter_query(self, **kwargs): if 'status' not in kwargs.keys(): kwargs['status'] =",
"key != 'id': setattr(self, key, value) def keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor",
"kwargs.keys(): kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query) class",
"key, value in attrs_dict.items(): if hasattr(self, key) and key != 'id': setattr(self, key,",
"orm from contextlib import contextmanager from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def",
"except Exception as e: db.session.rollback() raise e class Query(BaseQuery): def filter_query(self, **kwargs): if",
"SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy import inspect, Column, Integer, SmallInteger, orm from",
"[] self._set_fields() self.__prune_fields() def _set_fields(self): pass def __prune_fields(self): columns = inspect(self.__class__).columns if not",
"args: self._fields.remove(key) return self def keys(self): return self._fields def __getitem__(self, key): return getattr(self,",
"# self._include = [] self._exclude = [] self._set_fields() self.__prune_fields() def _set_fields(self): pass def",
"auto_commit(self): try: yield self.session.commit() except Exception as e: db.session.rollback() raise e class Query(BaseQuery):",
"from contextlib import contextmanager from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager def auto_commit(self):",
"True def set_attrs(self, attrs_dict): for key, value in attrs_dict.items(): if hasattr(self, key) and",
"= [] # self._include = [] self._exclude = [] self._set_fields() self.__prune_fields() def _set_fields(self):",
"class Base(db.Model): __abstract__ = True def set_attrs(self, attrs_dict): for key, value in attrs_dict.items():",
"if not self._fields: all_columns = set(columns.keys()) self._fields = list(all_columns - set(self._exclude)) def hide(self,",
"return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__ = True",
"pass def __prune_fields(self): columns = inspect(self.__class__).columns if not self._fields: all_columns = set(columns.keys()) self._fields",
"Integer, SmallInteger, orm from contextlib import contextmanager from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy):",
"value in attrs_dict.items(): if hasattr(self, key) and key != 'id': setattr(self, key, value)",
"SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__ = True def set_attrs(self, attrs_dict): for key,",
"filter_query(self, **kwargs): if 'status' not in kwargs.keys(): kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs)",
"if hasattr(self, key) and key != 'id': setattr(self, key, value) def keys(self): return",
"attrs_dict.items(): if hasattr(self, key) and key != 'id': setattr(self, key, value) def keys(self):",
"SmallInteger, orm from contextlib import contextmanager from app.libs.error_code import NotFound class SQLAlchemy(_SQLAlchemy): @contextmanager",
"yield self.session.commit() except Exception as e: db.session.rollback() raise e class Query(BaseQuery): def filter_query(self,",
"self._fields.remove(key) return self def keys(self): return self._fields def __getitem__(self, key): return getattr(self, key)",
"def set_attrs(self, attrs_dict): for key, value in attrs_dict.items(): if hasattr(self, key) and key",
"in kwargs.keys(): kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query)",
"flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy import inspect, Column, Integer, SmallInteger,",
"self._fields = list(all_columns - set(self._exclude)) def hide(self, *args): for key in args: self._fields.remove(key)",
"raise e class Query(BaseQuery): def filter_query(self, **kwargs): if 'status' not in kwargs.keys(): kwargs['status']",
"hide(self, *args): for key in args: self._fields.remove(key) return self def keys(self): return self._fields",
"for key in args: self._fields.remove(key) return self def keys(self): return self._fields def __getitem__(self,",
"datetime import datetime from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery from sqlalchemy import",
"def filter_query(self, **kwargs): if 'status' not in kwargs.keys(): kwargs['status'] = 1 return super(Query,",
"set_attrs(self, attrs_dict): for key, value in attrs_dict.items(): if hasattr(self, key) and key !=",
"and key != 'id': setattr(self, key, value) def keys(self): return self.fields class MixinJSONSerializer:",
"e: db.session.rollback() raise e class Query(BaseQuery): def filter_query(self, **kwargs): if 'status' not in",
"**kwargs): if 'status' not in kwargs.keys(): kwargs['status'] = 1 return super(Query, self).filter_by(**kwargs) db",
"from sqlalchemy import inspect, Column, Integer, SmallInteger, orm from contextlib import contextmanager from",
"Column, Integer, SmallInteger, orm from contextlib import contextmanager from app.libs.error_code import NotFound class",
"MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields = [] # self._include = [] self._exclude =",
"sqlalchemy import inspect, Column, Integer, SmallInteger, orm from contextlib import contextmanager from app.libs.error_code",
"Query) class Base(db.Model): __abstract__ = True def set_attrs(self, attrs_dict): for key, value in",
"def __prune_fields(self): columns = inspect(self.__class__).columns if not self._fields: all_columns = set(columns.keys()) self._fields =",
"= set(columns.keys()) self._fields = list(all_columns - set(self._exclude)) def hide(self, *args): for key in",
"self._exclude = [] self._set_fields() self.__prune_fields() def _set_fields(self): pass def __prune_fields(self): columns = inspect(self.__class__).columns",
"__prune_fields(self): columns = inspect(self.__class__).columns if not self._fields: all_columns = set(columns.keys()) self._fields = list(all_columns",
"= inspect(self.__class__).columns if not self._fields: all_columns = set(columns.keys()) self._fields = list(all_columns - set(self._exclude))",
"= 1 return super(Query, self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__",
"Exception as e: db.session.rollback() raise e class Query(BaseQuery): def filter_query(self, **kwargs): if 'status'",
"self).filter_by(**kwargs) db = SQLAlchemy(query_class = Query) class Base(db.Model): __abstract__ = True def set_attrs(self,",
"def _set_fields(self): pass def __prune_fields(self): columns = inspect(self.__class__).columns if not self._fields: all_columns =",
"key) and key != 'id': setattr(self, key, value) def keys(self): return self.fields class",
"def init_on_load(self): self._fields = [] # self._include = [] self._exclude = [] self._set_fields()",
"Base(db.Model): __abstract__ = True def set_attrs(self, attrs_dict): for key, value in attrs_dict.items(): if",
"self._fields = [] # self._include = [] self._exclude = [] self._set_fields() self.__prune_fields() def",
"try: yield self.session.commit() except Exception as e: db.session.rollback() raise e class Query(BaseQuery): def",
"self._set_fields() self.__prune_fields() def _set_fields(self): pass def __prune_fields(self): columns = inspect(self.__class__).columns if not self._fields:",
"= list(all_columns - set(self._exclude)) def hide(self, *args): for key in args: self._fields.remove(key) return",
"!= 'id': setattr(self, key, value) def keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor def",
"value) def keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields = []",
"def keys(self): return self.fields class MixinJSONSerializer: @orm.reconstructor def init_on_load(self): self._fields = [] #"
] |
[
"\"Member left\" else: name = u.name glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name =",
"= discord.ActivityType.watching)) # Simplistic help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in",
"Removing default help (I don't like it) bot.add_cog(Cogs.Admin(bot)) # Many cog bot.add_cog(Cogs.Bank(bot)) bot.add_cog(Cogs.Counting(bot))",
"ud: k = int(k) u = bot.get_user(k) if u is None: name =",
"discord import Cogs #type: ignore import glo #type: ignore from discord.ext import commands",
"os.system(\"git pull\") os.system(\"pm2 restart Phase\") if message.author.bot: return # We don't like bots",
"Phase\") if message.author.bot: return # We don't like bots return await bot.process_commands(message) bot",
"== 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\") if message.author.bot: return # We don't",
"import glo #type: ignore from discord.ext import commands class PhaseBot(commands.Bot): \"\"\" The bot",
"= PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing the embed bot.remove_command('help') #",
"pull\") os.system(\"pm2 restart Phase\") if message.author.bot: return # We don't like bots return",
"ud[\"default\"] for k in ud: k = int(k) u = bot.get_user(k) if u",
"\"\"\" The bot \"\"\" async def on_ready(self): print(\"Discodo!\") # Great, it's working await",
"= glo.PREFIX, intents = discord.Intents.all()) # Writing the embed bot.remove_command('help') # Removing default",
"help (I don't like it) bot.add_cog(Cogs.Admin(bot)) # Many cog bot.add_cog(Cogs.Bank(bot)) bot.add_cog(Cogs.Counting(bot)) bot.add_cog(Cogs.General(bot)) bot.add_cog(Cogs.Listeners(bot))",
"# Removing default help (I don't like it) bot.add_cog(Cogs.Admin(bot)) # Many cog bot.add_cog(Cogs.Bank(bot))",
"glo.PREFIX, intents = discord.Intents.all()) # Writing the embed bot.remove_command('help') # Removing default help",
"embed bot.remove_command('help') # Removing default help (I don't like it) bot.add_cog(Cogs.Admin(bot)) # Many",
"return # We don't like bots return await bot.process_commands(message) bot = PhaseBot(command_prefix =",
"help async def on_message(self, message): if message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart",
"discord.Activity(name = f\"my startup...\", type = discord.ActivityType.watching)) # Simplistic help ud = glo.JSONREAD(\"userdata.json\")",
"# We don't like bots return await bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX,",
"await bot.change_presence(activity = discord.Activity(name = f\"my startup...\", type = discord.ActivityType.watching)) # Simplistic help",
"like it) bot.add_cog(Cogs.Admin(bot)) # Many cog bot.add_cog(Cogs.Bank(bot)) bot.add_cog(Cogs.Counting(bot)) bot.add_cog(Cogs.General(bot)) bot.add_cog(Cogs.Listeners(bot)) bot.add_cog(Cogs.Starboard(bot)) bot.add_cog(Cogs.Tasks(bot)) bot.run(glo.GLOBAL_READ(\"token\"))",
"bot.remove_command('help') # Removing default help (I don't like it) bot.add_cog(Cogs.Admin(bot)) # Many cog",
"ignore from discord.ext import commands class PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async def",
"bot.change_presence(activity = discord.Activity(name = f\"le noir | v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic",
"await bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing the",
"async def on_ready(self): print(\"Discodo!\") # Great, it's working await bot.change_presence(activity = discord.Activity(name =",
"type = discord.ActivityType.watching)) # Simplistic help async def on_message(self, message): if message.channel.id ==",
"Writing the embed bot.remove_command('help') # Removing default help (I don't like it) bot.add_cog(Cogs.Admin(bot))",
"don't like it) bot.add_cog(Cogs.Admin(bot)) # Many cog bot.add_cog(Cogs.Bank(bot)) bot.add_cog(Cogs.Counting(bot)) bot.add_cog(Cogs.General(bot)) bot.add_cog(Cogs.Listeners(bot)) bot.add_cog(Cogs.Starboard(bot)) bot.add_cog(Cogs.Tasks(bot))",
"PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async def on_ready(self): print(\"Discodo!\") # Great, it's working",
"os import discord import Cogs #type: ignore import glo #type: ignore from discord.ext",
"discord.ActivityType.watching)) # Simplistic help async def on_message(self, message): if message.channel.id == 796374619900084255: os.system(\"git",
"PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing the embed bot.remove_command('help') # Removing",
"Simplistic help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in ud: k =",
"name = \"Member left\" else: name = u.name glo.SETNAME(k, name) await bot.change_presence(activity =",
"help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in ud: k = int(k)",
"left\" else: name = u.name glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name = f\"le",
"working await bot.change_presence(activity = discord.Activity(name = f\"my startup...\", type = discord.ActivityType.watching)) # Simplistic",
"Simplistic help async def on_message(self, message): if message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2",
"if message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\") if message.author.bot: return #",
"= glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in ud: k = int(k) u =",
"int(k) u = bot.get_user(k) if u is None: name = \"Member left\" else:",
"else: name = u.name glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name = f\"le noir",
"= \"Member left\" else: name = u.name glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name",
"for k in ud: k = int(k) u = bot.get_user(k) if u is",
"796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\") if message.author.bot: return # We don't like",
"#type: ignore from discord.ext import commands class PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async",
"\"\"\" async def on_ready(self): print(\"Discodo!\") # Great, it's working await bot.change_presence(activity = discord.Activity(name",
"await bot.change_presence(activity = discord.Activity(name = f\"le noir | v{glo.VERSION}\", type = discord.ActivityType.watching)) #",
"import os import discord import Cogs #type: ignore import glo #type: ignore from",
"= discord.Activity(name = f\"my startup...\", type = discord.ActivityType.watching)) # Simplistic help ud =",
"None: name = \"Member left\" else: name = u.name glo.SETNAME(k, name) await bot.change_presence(activity",
"discord.Intents.all()) # Writing the embed bot.remove_command('help') # Removing default help (I don't like",
"restart Phase\") if message.author.bot: return # We don't like bots return await bot.process_commands(message)",
"k = int(k) u = bot.get_user(k) if u is None: name = \"Member",
"default help (I don't like it) bot.add_cog(Cogs.Admin(bot)) # Many cog bot.add_cog(Cogs.Bank(bot)) bot.add_cog(Cogs.Counting(bot)) bot.add_cog(Cogs.General(bot))",
"def on_ready(self): print(\"Discodo!\") # Great, it's working await bot.change_presence(activity = discord.Activity(name = f\"my",
"on_message(self, message): if message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\") if message.author.bot:",
"intents = discord.Intents.all()) # Writing the embed bot.remove_command('help') # Removing default help (I",
"v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic help async def on_message(self, message): if message.channel.id",
"# Writing the embed bot.remove_command('help') # Removing default help (I don't like it)",
"print(\"Discodo!\") # Great, it's working await bot.change_presence(activity = discord.Activity(name = f\"my startup...\", type",
"= int(k) u = bot.get_user(k) if u is None: name = \"Member left\"",
"discord.ext import commands class PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async def on_ready(self): print(\"Discodo!\")",
"u = bot.get_user(k) if u is None: name = \"Member left\" else: name",
"message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\") if message.author.bot: return # We",
"(I don't like it) bot.add_cog(Cogs.Admin(bot)) # Many cog bot.add_cog(Cogs.Bank(bot)) bot.add_cog(Cogs.Counting(bot)) bot.add_cog(Cogs.General(bot)) bot.add_cog(Cogs.Listeners(bot)) bot.add_cog(Cogs.Starboard(bot))",
"bots return await bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) #",
"The bot \"\"\" async def on_ready(self): print(\"Discodo!\") # Great, it's working await bot.change_presence(activity",
"u is None: name = \"Member left\" else: name = u.name glo.SETNAME(k, name)",
"os.system(\"pm2 restart Phase\") if message.author.bot: return # We don't like bots return await",
"commands class PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async def on_ready(self): print(\"Discodo!\") # Great,",
"like bots return await bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all())",
"f\"my startup...\", type = discord.ActivityType.watching)) # Simplistic help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"]",
"noir | v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic help async def on_message(self, message):",
"bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing the embed bot.remove_command('help')",
"return await bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing",
"= f\"le noir | v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic help async def",
"from discord.ext import commands class PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async def on_ready(self):",
"Great, it's working await bot.change_presence(activity = discord.Activity(name = f\"my startup...\", type = discord.ActivityType.watching))",
"if u is None: name = \"Member left\" else: name = u.name glo.SETNAME(k,",
"= discord.Activity(name = f\"le noir | v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic help",
"| v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic help async def on_message(self, message): if",
"ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in ud: k = int(k) u",
"import commands class PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async def on_ready(self): print(\"Discodo!\") #",
"if message.author.bot: return # We don't like bots return await bot.process_commands(message) bot =",
"message.author.bot: return # We don't like bots return await bot.process_commands(message) bot = PhaseBot(command_prefix",
"= discord.Intents.all()) # Writing the embed bot.remove_command('help') # Removing default help (I don't",
"async def on_message(self, message): if message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\")",
"bot.change_presence(activity = discord.Activity(name = f\"my startup...\", type = discord.ActivityType.watching)) # Simplistic help ud",
"the embed bot.remove_command('help') # Removing default help (I don't like it) bot.add_cog(Cogs.Admin(bot)) #",
"on_ready(self): print(\"Discodo!\") # Great, it's working await bot.change_presence(activity = discord.Activity(name = f\"my startup...\",",
"= f\"my startup...\", type = discord.ActivityType.watching)) # Simplistic help ud = glo.JSONREAD(\"userdata.json\") del",
"f\"le noir | v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic help async def on_message(self,",
"def on_message(self, message): if message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\") if",
"bot \"\"\" async def on_ready(self): print(\"Discodo!\") # Great, it's working await bot.change_presence(activity =",
"name = u.name glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name = f\"le noir |",
"= discord.ActivityType.watching)) # Simplistic help async def on_message(self, message): if message.channel.id == 796374619900084255:",
"Cogs #type: ignore import glo #type: ignore from discord.ext import commands class PhaseBot(commands.Bot):",
"glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name = f\"le noir | v{glo.VERSION}\", type =",
"= u.name glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name = f\"le noir | v{glo.VERSION}\",",
"k in ud: k = int(k) u = bot.get_user(k) if u is None:",
"is None: name = \"Member left\" else: name = u.name glo.SETNAME(k, name) await",
"don't like bots return await bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX, intents =",
"in ud: k = int(k) u = bot.get_user(k) if u is None: name",
"discord.ActivityType.watching)) # Simplistic help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in ud:",
"u.name glo.SETNAME(k, name) await bot.change_presence(activity = discord.Activity(name = f\"le noir | v{glo.VERSION}\", type",
"# Simplistic help async def on_message(self, message): if message.channel.id == 796374619900084255: os.system(\"git pull\")",
"bot.get_user(k) if u is None: name = \"Member left\" else: name = u.name",
"startup...\", type = discord.ActivityType.watching)) # Simplistic help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for",
"class PhaseBot(commands.Bot): \"\"\" The bot \"\"\" async def on_ready(self): print(\"Discodo!\") # Great, it's",
"# Great, it's working await bot.change_presence(activity = discord.Activity(name = f\"my startup...\", type =",
"it's working await bot.change_presence(activity = discord.Activity(name = f\"my startup...\", type = discord.ActivityType.watching)) #",
"import Cogs #type: ignore import glo #type: ignore from discord.ext import commands class",
"# Simplistic help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in ud: k",
"= bot.get_user(k) if u is None: name = \"Member left\" else: name =",
"bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing the embed",
"We don't like bots return await bot.process_commands(message) bot = PhaseBot(command_prefix = glo.PREFIX, intents",
"import discord import Cogs #type: ignore import glo #type: ignore from discord.ext import",
"type = discord.ActivityType.watching)) # Simplistic help ud = glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k",
"message): if message.channel.id == 796374619900084255: os.system(\"git pull\") os.system(\"pm2 restart Phase\") if message.author.bot: return",
"name) await bot.change_presence(activity = discord.Activity(name = f\"le noir | v{glo.VERSION}\", type = discord.ActivityType.watching))",
"#type: ignore import glo #type: ignore from discord.ext import commands class PhaseBot(commands.Bot): \"\"\"",
"glo #type: ignore from discord.ext import commands class PhaseBot(commands.Bot): \"\"\" The bot \"\"\"",
"ignore import glo #type: ignore from discord.ext import commands class PhaseBot(commands.Bot): \"\"\" The",
"del ud[\"default\"] for k in ud: k = int(k) u = bot.get_user(k) if",
"discord.Activity(name = f\"le noir | v{glo.VERSION}\", type = discord.ActivityType.watching)) # Simplistic help async",
"glo.JSONREAD(\"userdata.json\") del ud[\"default\"] for k in ud: k = int(k) u = bot.get_user(k)"
] |
[
".VUPScorer import * from .NUPScorer import * from .MLMScorer import * from .distinct",
"name=\"models\" from .VUPScorer import * from .NUPScorer import * from .MLMScorer import *",
"* from .MLMScorer import * from .distinct import * from .composite import *",
"from .VUPScorer import * from .NUPScorer import * from .MLMScorer import * from",
"import * from .MLMScorer import * from .distinct import * from .composite import",
".NUPScorer import * from .MLMScorer import * from .distinct import * from .composite",
"import * from .NUPScorer import * from .MLMScorer import * from .distinct import",
"* from .NUPScorer import * from .MLMScorer import * from .distinct import *",
"from .NUPScorer import * from .MLMScorer import * from .distinct import * from"
] |
[
"setup from setuptools_rust import RustExtension from os import path if __name__ == \"__main__\":",
"import RustExtension from os import path if __name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__))",
"path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\",",
"# Do NOT edit. Will be updated for release by CI pipeline classifiers=[",
"Language :: Python\", \"Programming Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"), RustExtension(\"rastro_python.orbits\") ],",
"Audience :: Developers\", \"Programming Language :: Python\", \"Programming Language :: Rust\", ], packages=[\"rastro\"],",
"NOT edit. Will be updated for release by CI pipeline classifiers=[ \"License ::",
":: Python\", \"Programming Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"), RustExtension(\"rastro_python.orbits\") ], include_package_data=True,",
"path if __name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as",
"'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do NOT",
"Do NOT edit. Will be updated for release by CI pipeline classifiers=[ \"License",
"by CI pipeline classifiers=[ \"License :: OSI Approved :: MIT License\", \"Intended Audience",
"OSI Approved :: MIT License\", \"Intended Audience :: Developers\", \"Programming Language :: Python\",",
"\"Intended Audience :: Developers\", \"Programming Language :: Python\", \"Programming Language :: Rust\", ],",
"setuptools_rust import RustExtension from os import path if __name__ == \"__main__\": this_directory =",
"__name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description",
"f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do NOT edit. Will be updated for release",
"\"Programming Language :: Python\", \"Programming Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"), RustExtension(\"rastro_python.orbits\")",
"Python\", \"Programming Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"), RustExtension(\"rastro_python.orbits\") ], include_package_data=True, zip_safe=False,",
"for release by CI pipeline classifiers=[ \"License :: OSI Approved :: MIT License\",",
"encoding='utf-8') as f: long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do NOT edit.",
"this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup(",
"= path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=\"rastro\",",
":: MIT License\", \"Intended Audience :: Developers\", \"Programming Language :: Python\", \"Programming Language",
"= f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do NOT edit. Will be updated for",
"setup( name=\"rastro\", version=\"0.0.0\", # Do NOT edit. Will be updated for release by",
"python from setuptools import setup from setuptools_rust import RustExtension from os import path",
"from setuptools_rust import RustExtension from os import path if __name__ == \"__main__\": this_directory",
"RustExtension from os import path if __name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with",
"import path if __name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8')",
"name=\"rastro\", version=\"0.0.0\", # Do NOT edit. Will be updated for release by CI",
"CI pipeline classifiers=[ \"License :: OSI Approved :: MIT License\", \"Intended Audience ::",
"classifiers=[ \"License :: OSI Approved :: MIT License\", \"Intended Audience :: Developers\", \"Programming",
":: OSI Approved :: MIT License\", \"Intended Audience :: Developers\", \"Programming Language ::",
"if __name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:",
"from os import path if __name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory,",
"Will be updated for release by CI pipeline classifiers=[ \"License :: OSI Approved",
"<reponame>duncaneddy/rastro #!/usr/bin/env python from setuptools import setup from setuptools_rust import RustExtension from os",
"with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\", #",
"f: long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do NOT edit. Will be",
"be updated for release by CI pipeline classifiers=[ \"License :: OSI Approved ::",
"\"License :: OSI Approved :: MIT License\", \"Intended Audience :: Developers\", \"Programming Language",
"#!/usr/bin/env python from setuptools import setup from setuptools_rust import RustExtension from os import",
"release by CI pipeline classifiers=[ \"License :: OSI Approved :: MIT License\", \"Intended",
"pipeline classifiers=[ \"License :: OSI Approved :: MIT License\", \"Intended Audience :: Developers\",",
"== \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description =",
"setuptools import setup from setuptools_rust import RustExtension from os import path if __name__",
"open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do",
"long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do NOT edit. Will be updated",
"version=\"0.0.0\", # Do NOT edit. Will be updated for release by CI pipeline",
"os import path if __name__ == \"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'),",
"edit. Will be updated for release by CI pipeline classifiers=[ \"License :: OSI",
"Approved :: MIT License\", \"Intended Audience :: Developers\", \"Programming Language :: Python\", \"Programming",
"import setup from setuptools_rust import RustExtension from os import path if __name__ ==",
"License\", \"Intended Audience :: Developers\", \"Programming Language :: Python\", \"Programming Language :: Rust\",",
":: Developers\", \"Programming Language :: Python\", \"Programming Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[",
"Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"), RustExtension(\"rastro_python.orbits\") ], include_package_data=True, zip_safe=False, long_description=long_description, long_description_content_type='text/markdown',",
"Developers\", \"Programming Language :: Python\", \"Programming Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"),",
"MIT License\", \"Intended Audience :: Developers\", \"Programming Language :: Python\", \"Programming Language ::",
"\"__main__\": this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read()",
"updated for release by CI pipeline classifiers=[ \"License :: OSI Approved :: MIT",
":: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"), RustExtension(\"rastro_python.orbits\") ], include_package_data=True, zip_safe=False, long_description=long_description, long_description_content_type='text/markdown', )",
"as f: long_description = f.read() setup( name=\"rastro\", version=\"0.0.0\", # Do NOT edit. Will",
"from setuptools import setup from setuptools_rust import RustExtension from os import path if",
"\"Programming Language :: Rust\", ], packages=[\"rastro\"], rust_extensions=[ RustExtension(\"rastro_python.constants\"), RustExtension(\"rastro_python.orbits\") ], include_package_data=True, zip_safe=False, long_description=long_description,"
] |
[
"in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines what move the",
"finishes their turn, this code checks who wins, loses, or ties def standcheck():",
"if again==('no'): crash() else: again=input('yes or no') if again==('yes'): blackjack() if again==('no'): crash()",
"global cardlistStr global dealerlistStr global losex losex=losex+1 print('you have won '+str(winx) + \"times",
"of the game print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open card",
"If you are wondering which focus day this is connected to, it isn't",
"have won '+str(winx) + \"times and lost \"+str(losex)+\" times\") print() print('you lost :(')",
"I forgot it had to be related to a focus day, but it",
"are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if again2==('yes'): blackjack()",
"def lose(): global cardlist global dealerlist global cardlistStr global dealerlistStr global losex losex=losex+1",
"!= ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() #globals",
"are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for x in range(len(cardlist)):",
"'+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1)",
"lose, win or tie def lose(): global cardlist global dealerlist global cardlistStr global",
"lose(): global cardlist global dealerlist global cardlistStr global dealerlistStr global losex losex=losex+1 print('you",
"lists and some random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51))",
"\") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again? ') if again2==('yes'):",
"print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if",
"are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21): for x in range(len(cardlist)): if",
"while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ') if (cardlist)==(21): win()",
"cardlist global dealerlist global cardlistStr global dealerlistStr #defines lists and some random ints",
"names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card names cardvalue={ 'A': 11, '2': 2,",
"'+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if",
"print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play",
"turn to the dealer, and hit to draw a new card\") #defines lists",
"if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+',",
"cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if",
"dealerlistStr #defines lists and some random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51))",
"\"+str(carddeck[c3])) #after the dealer finishes their turn, this code checks who wins, loses,",
"#globals the lists global cardlist global dealerlist global cardlistStr global dealerlistStr #defines lists",
"sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick",
"10 } #this function crashes python def crash(): try: crash() except: crash() #blakcjack",
"print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if again2==('yes'): blackjack() if again2==('no'):",
"or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ')",
"crash() def tie(): global cardlist global dealerlist global cardlistStr global dealerlistStr print(\"The dealer's",
"it, and I forgot it had to be related to a focus day,",
"choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if",
"what move the dealer does when it is their turn def stand(): if",
"dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ') if again==('yes'): blackjack() if again==('no'): crash() else:",
"won :)') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \")",
"else: choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or",
"dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ') choice1=input('Hit or stand? ') while choice1!=('hit') and",
"cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your cards are at",
"to beat the dealer. If you get over 21, you lose. stand to",
"code checks who wins, loses, or ties def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist):",
"lose, tie, win functions that happen when you lose, win or tie def",
"cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21): for x in range(len(cardlist)):",
"cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10)))",
"print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ')",
"range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines what move the dealer",
"play: your goal is to get your card total closest to 21, and",
"else: again=input('yes or no') if again==('yes'): blackjack() if again==('no'): crash() def win(): global",
"it had to be related to a focus day, but it was too",
"dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines what move the dealer does when it",
"times and lost \"+str(losex)+\" times\") print() print('you won :)') print(\"The dealer's cards are",
"tie, win functions that happen when you lose, win or tie def lose():",
"\") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play",
"and some random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this",
"'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9':",
"'7': 7, '8': 8, '9': 9, '10': 10, 'J': 10, 'Q': 10, 'K':",
"to give the turn to the dealer, and hit to draw a new",
"blackjack() if again==('no'): crash() def win(): global cardlist global dealerlist global cardlistStr global",
"or stand ') if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards",
"crash() #globals the lists global cardlist global dealerlist global cardlistStr global dealerlistStr #defines",
"to play: your goal is to get your card total closest to 21,",
"range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21):",
"c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+',",
"get your card total closest to 21, and to beat the dealer. If",
"if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+',",
"lose. stand to give the turn to the dealer, and hit to draw",
"in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() else:",
"print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21:",
"dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if again2==('yes'): blackjack() if again2==('no'): crash() if",
"while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds all the beginning",
"da=0 db=0 winx=0 losex=0 #defines the list for where I will take card",
"function crashes python def crash(): try: crash() except: crash() #blakcjack funtion def blackjack():",
"choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+',",
"10, 'J': 10, 'Q': 10, 'K': 10 } #this function crashes python def",
"choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+',",
"stand? ') choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit",
"c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+',",
"either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7])",
"isn't connected to any of them. \") print() print(\"I started making it, and",
"\") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you",
"the start of the game print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's",
"#define lose, tie, win functions that happen when you lose, win or tie",
"move the dealer does when it is their turn def stand(): if sum(dealerlist)>(17):",
"crashes python def crash(): try: crash() except: crash() #blakcjack funtion def blackjack(): #define",
"and '+(carddeck[c6])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21):",
"if sum(dealerlist)>(21): for x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This",
"either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9])",
"'+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1)",
"dealerlistStr=[] again=input('try again? ') if again==('yes'): blackjack() if again==('no'): crash() else: again=input('yes or",
"dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds all the",
"day, but it was too late to switch, so here it is \")",
"again2==('no'): print('ok') crash() #globals the lists global cardlist global dealerlist global cardlistStr global",
"the dealer. If you get over 21, you lose. stand to give the",
"again==('no'): crash() def win(): global cardlist global dealerlist global cardlistStr global dealerlistStr global",
"print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for x",
"str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for x in range(len(cardlist)): if",
"!= ('yes') or again2 != ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if",
"either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10])",
"cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if again2==('yes'): blackjack() if again2==('no'): crash() if again2",
"cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21:",
"sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else:",
"print() print('you won :)') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards",
"cardlistStr global dealerlistStr global losex losex=losex+1 print('you have won '+str(winx) + \"times and",
"cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again? ')",
"'+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for x in range(len(cardlist)):",
"print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again?",
"#Assigns values to the card names cardvalue={ 'A': 11, '2': 2, '3': 3,",
"is blackjack. If you are wondering which focus day this is connected to,",
"print() print('you lost :(') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards",
"'+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for x in range(len(cardlist)): if",
"if again2 != ('yes') or again2 != ('no'): again2=input('yes or no') if again2==('yes'):",
"to draw a new card\") #defines lists and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[]",
"again=input('yes or no') if again==('yes'): blackjack() if again==('no'): crash() def win(): global cardlist",
"print('ok') crash() if again2 != ('yes') or again2 != ('no'): again2=input('yes or no')",
"if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+',",
"stand ') if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are",
"the dealer finishes their turn, this code checks who wins, loses, or ties",
"c3=0 c4=0 a=0 b=0 da=0 db=0 winx=0 losex=0 #defines the list for where",
"cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ')",
"choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+',",
"it was too late to switch, so here it is \") print() print(\"how",
"or no') if again2==('yes'): blackjack() if again2==('no'): crash() def tie(): global cardlist global",
"choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51))",
"'+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open card is \"+str(carddeck[c3])) #after the dealer finishes",
"dealer does when it is their turn def stand(): if sum(dealerlist)>(17): standcheck() if",
"sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]])",
"your cards are at the start of the game print('Your cards are '+str(carddeck[c1])+'",
"it isn't connected to any of them. \") print() print(\"I started making it,",
"cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0 b=0 da=0 db=0 winx=0 losex=0 #defines",
"and '+(carddeck[c7])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21):",
"') choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or",
"lose() if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or stand? ') while choice1!=('hit') and",
"closest to 21, and to beat the dealer. If you get over 21,",
"('yes') or again2 != ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'):",
"if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5]))",
"are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if",
"') if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+',",
"and hit to draw a new card\") #defines lists and values cardlist=[] dealerlist=[]",
"'+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21):",
"crash() except: crash() #blakcjack funtion def blackjack(): #define lose, tie, win functions that",
"print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open card is \"+str(carddeck[c3])) #after",
"cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0 b=0 da=0 db=0 winx=0",
"'5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'J':",
"def tie(): global cardlist global dealerlist global cardlistStr global dealerlistStr print(\"The dealer's cards",
"global cardlistStr global dealerlistStr print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards",
"cardlistStr global dealerlistStr print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are",
"are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ') if again==('yes'): blackjack()",
"dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds all the beginning variables to their",
"cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ') if again==('yes'):",
"#after the dealer finishes their turn, this code checks who wins, loses, or",
"print('BLACKJACK') win() else: choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either",
"connected to, it isn't connected to any of them. \") print() print(\"I started",
"again2 != ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): crash() def",
"crash() if again2 != ('yes') or again2 != ('no'): again2=input('yes or no') if",
"'+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1)",
"\" times and lost \"+str(losex)+\" times\") print() print('you won :)') print(\"The dealer's cards",
"forgot it had to be related to a focus day, but it was",
"c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your cards are at the start",
"turn, this code checks who wins, loses, or ties def standcheck(): if sum(dealerlist)<=(21):",
"if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+',",
"that happen when you lose, win or tie def lose(): global cardlist global",
"') if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+',",
"cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21:",
"'+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if",
"and lost \"+str(losex)+\" times\") print() print('you lost :(') print(\"The dealer's cards are \")",
"sum(dealerlist)>(21): for x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines",
"resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or",
"cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your cards",
"sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21): for x in range(len(dealerlist)): if",
"cardlist global dealerlist global cardlistStr global dealerlistStr global losex losex=losex+1 print('you have won",
"choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]])",
"if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() if again2 != ('yes') or again2",
"the dealer does when it is their turn def stand(): if sum(dealerlist)>(17): standcheck()",
"either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6])",
"cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for x in",
"new card\") #defines lists and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0",
"losex=losex+1 print('you have won '+str(winx) + \"times and lost \"+str(losex)+\" times\") print() print('you",
"'+str(carddeck[c2])) print(\"The dealer's open card is \"+str(carddeck[c3])) #after the dealer finishes their turn,",
"def stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51))",
"\") print() print(\"how to play: your goal is to get your card total",
"to be related to a focus day, but it was too late to",
"and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0 b=0 da=0",
"cardlist global dealerlist global cardlistStr global dealerlistStr print(\"The dealer's cards are \") print(dealerlistStr)",
"if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck()",
"win or tie def lose(): global cardlist global dealerlist global cardlistStr global dealerlistStr",
"sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or stand? ') while choice1!=('hit')",
"dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your cards are",
"if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+',",
"cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ') if again==('yes'): blackjack() if again==('no'): crash()",
"your goal is to get your card total closest to 21, and to",
"to switch, so here it is \") print() print(\"how to play: your goal",
"no') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() #globals the lists global cardlist",
"cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if",
"sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21): for x",
"if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if",
"and '+ str(carddeck[c5])) if sum(cardlist)>(21): for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if",
"dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds all the beginning variables to their resepctive",
"choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6]))",
"and to beat the dealer. If you get over 21, you lose. stand",
"print('you have won '+str(winx) + \"times and lost \"+str(losex)+\" times\") print() print('you lost",
"it is \") print() print(\"how to play: your goal is to get your",
"cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for x",
"else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck()",
"stand ') if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are",
"7, '8': 8, '9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10",
"if sum(cardlist)>(21): for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if",
"global cardlist global dealerlist global cardlistStr global dealerlistStr global winx winx=winx+1 print('you have",
"their turn, this code checks who wins, loses, or ties def standcheck(): if",
"win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21): for x in",
"\") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try",
"choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]])",
"'+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for x in",
"global winx winx=winx+1 print('you have won '+str(winx) + \" times and lost \"+str(losex)+\"",
"dealerlistStr print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr)",
"again2==('no'): crash() if again2 != ('yes') or again2 != ('no'): again2=input('yes or no')",
"again2==('no'): crash() def tie(): global cardlist global dealerlist global cardlistStr global dealerlistStr print(\"The",
"cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and",
"are \") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[]",
"global dealerlistStr global winx winx=winx+1 print('you have won '+str(winx) + \" times and",
"times\") print() print('you won :)') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your",
"str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x in range(len(cardlist)): if",
"choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+",
"choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51))",
"if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or stand? ') while choice1!=('hit') and",
"choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]])",
"dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ') choice1=input('Hit or stand?",
"print(\"this game is blackjack. If you are wondering which focus day this is",
"play again? ') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() if again2 !=",
"c4=0 a=0 b=0 da=0 db=0 winx=0 losex=0 #defines the list for where I",
"if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and",
"cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11]))",
"over 21, you lose. stand to give the turn to the dealer, and",
"c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+',",
"a new card\") #defines lists and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0",
"5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'J': 10,",
"functions that happen when you lose, win or tie def lose(): global cardlist",
"\"+str(losex)+\" times\") print() print('you won :)') print(\"The dealer's cards are \") print(dealerlistStr) print()",
"sum(dealerlist)>(21): win() #This determines what move the dealer does when it is their",
"was too late to switch, so here it is \") print() print(\"how to",
"choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51)",
"their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit",
"and '+(carddeck[c9])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21):",
"win() else: choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit",
"'+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if",
"global cardlist global dealerlist global cardlistStr global dealerlistStr global losex losex=losex+1 print('you have",
"for where I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card",
"#blakcjack funtion def blackjack(): #define lose, tie, win functions that happen when you",
"you are wondering which focus day this is connected to, it isn't connected",
"the card names cardvalue={ 'A': 11, '2': 2, '3': 3, '4': 4, '5':",
"print() print(\"I started making it, and I forgot it had to be related",
"'+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for x in range(len(cardlist)): if",
"but it was too late to switch, so here it is \") print()",
"for x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines what",
"your card total closest to 21, and to beat the dealer. If you",
"21, you lose. stand to give the turn to the dealer, and hit",
"cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\"",
"choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]])",
"dealerlist global cardlistStr global dealerlistStr global winx winx=winx+1 print('you have won '+str(winx) +",
"#defines the list for where I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values",
"choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]])",
"dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ') choice1=input('Hit or",
"'+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for x in range(len(cardlist)):",
"stand to give the turn to the dealer, and hit to draw a",
"are wondering which focus day this is connected to, it isn't connected to",
"if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if choice1==('stand'): stand()",
"if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+',",
"10, 'K': 10 } #this function crashes python def crash(): try: crash() except:",
"2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,",
"dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ') choice1=input('Hit or stand? ') while choice1!=('hit')",
"'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8':",
"win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+",
"to any of them. \") print() print(\"I started making it, and I forgot",
"c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your cards are at the start of",
"'+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if",
"again=input('try again? ') if again==('yes'): blackjack() if again==('no'): crash() else: again=input('yes or no')",
"global dealerlist global cardlistStr global dealerlistStr #defines lists and some random ints cardlist=[]",
"w1=input('Hit or stand? ') choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick",
"start of the game print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open",
"focus day this is connected to, it isn't connected to any of them.",
"stand ') if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are",
"dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[]",
"and '+(carddeck[c8])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21):",
"is their turn def stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist):",
"the turn to the dealer, and hit to draw a new card\") #defines",
"global cardlist global dealerlist global cardlistStr global dealerlistStr #defines lists and some random",
"print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ') if",
"cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+',",
"in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if",
"game is blackjack. If you are wondering which focus day this is connected",
"or no') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() #globals the lists global",
"choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]])",
"all the beginning variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1])",
"print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[]",
"#this function crashes python def crash(): try: crash() except: crash() #blakcjack funtion def",
"dealer. If you get over 21, you lose. stand to give the turn",
"blackjack() if again2==('no'): crash() if again2 != ('yes') or again2 != ('no'): again2=input('yes",
"8, '9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10 } #this",
"lose() if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or stand? ') while choice1!=('hit')",
"global dealerlist global cardlistStr global dealerlistStr global losex losex=losex+1 print('you have won '+str(winx)",
"stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+',",
"or stand ') if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards",
"if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+',",
"cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if choice1==('stand'): stand() #a blackjack()",
"print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for",
"choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+",
"or stand ') if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards",
"sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or stand? ') while",
"loses, or ties def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win()",
"stand ') if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are",
"print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ') if again==('yes'): blackjack() if again==('no'):",
"to a focus day, but it was too late to switch, so here",
"cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ') choice1=input('Hit",
"cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+',",
"if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21): for",
"stand ') if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are",
"If you get over 21, you lose. stand to give the turn to",
"10, 'Q': 10, 'K': 10 } #this function crashes python def crash(): try:",
"when you lose, win or tie def lose(): global cardlist global dealerlist global",
"again2==('yes'): blackjack() if again2==('no'): crash() def tie(): global cardlist global dealerlist global cardlistStr",
"you get over 21, you lose. stand to give the turn to the",
"the game print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open card is",
"so here it is \") print() print(\"how to play: your goal is to",
"crash() else: again=input('yes or no') if again==('yes'): blackjack() if again==('no'): crash() def win():",
"dealer's open card is \"+str(carddeck[c3])) #after the dealer finishes their turn, this code",
"'+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11):",
"standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist):",
"print('you have won '+str(winx) + \" times and lost \"+str(losex)+\" times\") print() print('you",
"'+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for x in range(len(cardlist)):",
"11, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,",
"sum(cardlist)>(21): for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21:",
"def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if",
"wondering which focus day this is connected to, it isn't connected to any",
"print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ')",
"choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+",
"connected to any of them. \") print() print(\"I started making it, and I",
"def win(): global cardlist global dealerlist global cardlistStr global dealerlistStr global winx winx=winx+1",
"day this is connected to, it isn't connected to any of them. \")",
"time print(\"this game is blackjack. If you are wondering which focus day this",
"if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1])",
"standcheck() #Adds all the beginning variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]])",
"range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if choice1==('stand'):",
"try: crash() except: crash() #blakcjack funtion def blackjack(): #define lose, tie, win functions",
"again2==('yes'): blackjack() if again2==('no'): print('ok') crash() if again2 != ('yes') or again2 !=",
"choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51))",
"choice1=input('Pick either hit or stand ') if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]])",
"card\") #defines lists and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0",
"no') if again==('yes'): blackjack() if again==('no'): crash() def win(): global cardlist global dealerlist",
"cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if again2==('yes'):",
"if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or",
"print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if",
"stand ') if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are",
"when it is their turn def stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck()",
"lose() if sum(dealerlist)>(21): for x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win()",
"either hit or stand ') if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5])",
"again2 != ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash()",
"def blackjack(): #define lose, tie, win functions that happen when you lose, win",
"cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again? ') if again2==('yes'): blackjack() if",
"'+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21): for x in range(len(cardlist)): if cardlist[x]==(11):",
"') if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+',",
"print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21): for x in",
"tied, play again? ') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() if again2",
"values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0 b=0 da=0 db=0",
"'4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10':",
"if sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21): for x in range(len(dealerlist)): if dealerlist[x]==(11):",
"if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+',",
"str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for x in",
"'K': 10 } #this function crashes python def crash(): try: crash() except: crash()",
"values to the card names cardvalue={ 'A': 11, '2': 2, '3': 3, '4':",
"again==('yes'): blackjack() if again==('no'): crash() else: again=input('yes or no') if again==('yes'): blackjack() if",
"if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or stand? ') while",
"is \") print() print(\"how to play: your goal is to get your card",
"of them. \") print() print(\"I started making it, and I forgot it had",
"or stand? ') choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either",
"except: crash() #blakcjack funtion def blackjack(): #define lose, tie, win functions that happen",
"variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4])",
"ties def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21):",
"dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds all",
"you lose. stand to give the turn to the dealer, and hit to",
"') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ') if (cardlist)==(21):",
"a=0 b=0 da=0 db=0 winx=0 losex=0 #defines the list for where I will",
"and I forgot it had to be related to a focus day, but",
"lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand?",
"stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]])",
"choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51))",
"choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51))",
"'+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for x",
"sum(cardlist)<(21): choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or",
"cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ') choice1=input('Hit or stand? ') while",
"global cardlistStr global dealerlistStr global winx winx=winx+1 print('you have won '+str(winx) + \"",
"sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21): for x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1)",
"def crash(): try: crash() except: crash() #blakcjack funtion def blackjack(): #define lose, tie,",
"tie def lose(): global cardlist global dealerlist global cardlistStr global dealerlistStr global losex",
"if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or stand? ')",
"} #this function crashes python def crash(): try: crash() except: crash() #blakcjack funtion",
"are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again? ') if",
"') if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+',",
"stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ') if",
"'Q': 10, 'K': 10 } #this function crashes python def crash(): try: crash()",
"print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied,",
"print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again?",
"global cardlist global dealerlist global cardlistStr global dealerlistStr print(\"The dealer's cards are \")",
"who wins, loses, or ties def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if",
"str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11):",
"at the start of the game print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The",
"'+(carddeck[c7])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose()",
"print(\"I started making it, and I forgot it had to be related to",
"again2 != ('yes') or again2 != ('no'): again2=input('yes or no') if again2==('yes'): blackjack()",
"this code checks who wins, loses, or ties def standcheck(): if sum(dealerlist)<=(21): if",
"\"+(carddeck[c11])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose()",
"cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8]))",
"have won '+str(winx) + \" times and lost \"+str(losex)+\" times\") print() print('you won",
"+ \"times and lost \"+str(losex)+\" times\") print() print('you lost :(') print(\"The dealer's cards",
"or stand ') if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards",
"goal is to get your card total closest to 21, and to beat",
"global dealerlist global cardlistStr global dealerlistStr print(\"The dealer's cards are \") print(dealerlistStr) print()",
"winx=winx+1 print('you have won '+str(winx) + \" times and lost \"+str(losex)+\" times\") print()",
"the beginning variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2])",
"x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win()",
"if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose()",
"cards are at the start of the game print('Your cards are '+str(carddeck[c1])+' and",
"stand ') if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are",
"6, '7': 7, '8': 8, '9': 9, '10': 10, 'J': 10, 'Q': 10,",
"cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if again2==('yes'): blackjack() if again2==('no'): crash()",
"win() if sum(cardlist)<(21): choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either",
"global losex losex=losex+1 print('you have won '+str(winx) + \"times and lost \"+str(losex)+\" times\")",
"hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your",
"'+(carddeck[c6])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose()",
"will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card names cardvalue={ 'A':",
"again2=input('play again? ') if again2==('yes'): blackjack() if again2==('no'): crash() if again2 != ('yes')",
"sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds all the beginning variables",
"where I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card names",
"print('ok') crash() #globals the lists global cardlist global dealerlist global cardlistStr global dealerlistStr",
"win functions that happen when you lose, win or tie def lose(): global",
"again2==('no'): print('ok') crash() if again2 != ('yes') or again2 != ('no'): again2=input('yes or",
"what your cards are at the start of the game print('Your cards are",
"standcheck() if sum(dealerlist)>(17): standcheck() #Adds all the beginning variables to their resepctive lists",
"'+(carddeck(c10))) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose()",
"are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for",
"python def crash(): try: crash() except: crash() #blakcjack funtion def blackjack(): #define lose,",
"if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() #globals the lists global cardlist global",
"card total closest to 21, and to beat the dealer. If you get",
"making it, and I forgot it had to be related to a focus",
"hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your",
"'+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11):",
"\"times and lost \"+str(losex)+\" times\") print() print('you lost :(') print(\"The dealer's cards are",
"'+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for x in",
"cardlistStr global dealerlistStr #defines lists and some random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[]",
"'+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for",
"if again2==('no'): crash() def tie(): global cardlist global dealerlist global cardlistStr global dealerlistStr",
"cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21): for x",
"beginning variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3])",
"sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose() if",
"carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card names cardvalue={ 'A': 11, '2': 2, '3':",
"#this prints what your cards are at the start of the game print('Your",
"if again2==('yes'): blackjack() if again2==('no'): crash() if again2 != ('yes') or again2 !=",
"names cardvalue={ 'A': 11, '2': 2, '3': 3, '4': 4, '5': 5, '6':",
"wins, loses, or ties def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist):",
"dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines what move the dealer does when",
"+ \" times and lost \"+str(losex)+\" times\") print() print('you won :)') print(\"The dealer's",
"choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+",
"stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+',",
"get over 21, you lose. stand to give the turn to the dealer,",
"print(\"The dealer's open card is \"+str(carddeck[c3])) #after the dealer finishes their turn, this",
"dealerlist global cardlistStr global dealerlistStr global losex losex=losex+1 print('you have won '+str(winx) +",
"#defines lists and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0",
"\") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('play again? ') if again2==('yes'): blackjack() if",
"c4=(random.randint(0,51)) #this prints what your cards are at the start of the game",
"checks who wins, loses, or ties def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose()",
"winx=0 losex=0 #defines the list for where I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K']",
"or tie def lose(): global cardlist global dealerlist global cardlistStr global dealerlistStr global",
"dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again? ') if again2==('yes'): blackjack() if again2==('no'):",
"if again2==('no'): print('ok') crash() if again2 != ('yes') or again2 != ('no'): again2=input('yes",
"cardlistStr=[] dealerlistStr=[] again=input('try again? ') if again==('yes'): blackjack() if again==('no'): crash() else: again=input('yes",
"while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand()",
"4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10,",
"stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+',",
"cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21:",
"str(carddeck[c5])) if sum(cardlist)>(21): for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose()",
"if again2==('no'): crash() if again2 != ('yes') or again2 != ('no'): again2=input('yes or",
"lists global cardlist global dealerlist global cardlistStr global dealerlistStr #defines lists and some",
"cardlist global dealerlist global cardlistStr global dealerlistStr global winx winx=winx+1 print('you have won",
"'+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21): for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1)",
"if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'):",
"cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for",
"switch, so here it is \") print() print(\"how to play: your goal is",
"and lost \"+str(losex)+\" times\") print() print('you won :)') print(\"The dealer's cards are \")",
"dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds all the beginning variables to",
"win(): global cardlist global dealerlist global cardlistStr global dealerlistStr global winx winx=winx+1 print('you",
"no') if again2==('yes'): blackjack() if again2==('no'): crash() def tie(): global cardlist global dealerlist",
"choice1!=('stand'): choice1=input('Pick either hit or stand ') if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51)",
"'+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for x in range(len(cardlist)): if",
"again? ') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() if again2 != ('yes')",
"choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+",
"'+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for x in range(len(cardlist)):",
"standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16):",
"to 21, and to beat the dealer. If you get over 21, you",
"and '+str(carddeck[c2])) print(\"The dealer's open card is \"+str(carddeck[c3])) #after the dealer finishes their",
"lists and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0 b=0",
"9, '10': 10, 'J': 10, 'Q': 10, 'K': 10 } #this function crashes",
"dealerlistStr global winx winx=winx+1 print('you have won '+str(winx) + \" times and lost",
"choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ') if (cardlist)==(21): win() if",
"are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for x in",
"winx winx=winx+1 print('you have won '+str(winx) + \" times and lost \"+str(losex)+\" times\")",
"cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9]))",
"draw a new card\") #defines lists and values cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0",
"sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21:",
"if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines what move the dealer does",
"global dealerlistStr #defines lists and some random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51))",
"#Adds all the beginning variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]])",
"'10': 10, 'J': 10, 'Q': 10, 'K': 10 } #this function crashes python",
"or ties def standcheck(): if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if",
"hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your",
"\") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again=input('try again? ') if again==('yes'): blackjack() if",
"'+str(winx) + \"times and lost \"+str(losex)+\" times\") print() print('you lost :(') print(\"The dealer's",
"sum(dealerlist)>(17): standcheck() #Adds all the beginning variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]])",
"won '+str(winx) + \"times and lost \"+str(losex)+\" times\") print() print('you lost :(') print(\"The",
"global dealerlist global cardlistStr global dealerlistStr global winx winx=winx+1 print('you have won '+str(winx)",
"') if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+',",
"again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): crash() def tie(): global cardlist",
"again==('no'): crash() else: again=input('yes or no') if again==('yes'): blackjack() if again==('no'): crash() def",
"too late to switch, so here it is \") print() print(\"how to play:",
"sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if",
"if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+',",
"list for where I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the",
"dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17): standcheck() #Adds",
"blackjack() if again==('no'): crash() else: again=input('yes or no') if again==('yes'): blackjack() if again==('no'):",
"'+(carddeck[c8])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose()",
"(cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and",
"if again==('yes'): blackjack() if again==('no'): crash() def win(): global cardlist global dealerlist global",
"if again==('yes'): blackjack() if again==('no'): crash() else: again=input('yes or no') if again==('yes'): blackjack()",
"game print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open card is \"+str(carddeck[c3]))",
"open card is \"+str(carddeck[c3])) #after the dealer finishes their turn, this code checks",
"card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card names cardvalue={ 'A': 11, '2':",
"lost :(') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \")",
"c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21):",
"'8': 8, '9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10 }",
"if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit",
"dealerlist global cardlistStr global dealerlistStr #defines lists and some random ints cardlist=[] dealerlist=[]",
"is connected to, it isn't connected to any of them. \") print() print(\"I",
"print(\"how to play: your goal is to get your card total closest to",
"if sum(dealerlist)>(17): standcheck() #Adds all the beginning variables to their resepctive lists cardlist.append(cardvalue[carddeck[c1]])",
"any of them. \") print() print(\"I started making it, and I forgot it",
"print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again? ') if again2==('yes'): blackjack()",
"to get your card total closest to 21, and to beat the dealer.",
"'A': 11, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7':",
"stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+',",
"'6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'J': 10, 'Q':",
"'+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11):",
"ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your",
"dealerlistStr global losex losex=losex+1 print('you have won '+str(winx) + \"times and lost \"+str(losex)+\"",
"are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for",
"choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and",
"range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit",
"'+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x",
"hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your",
"or no') if again==('yes'): blackjack() if again==('no'): crash() def win(): global cardlist global",
"if choice1==('stand'): stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+',",
"b=0 da=0 db=0 winx=0 losex=0 #defines the list for where I will take",
"some random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints",
"'+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11):",
"import random import time print(\"this game is blackjack. If you are wondering which",
"either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11])",
"'+(carddeck[c9])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose()",
"cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21:",
"print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick",
"started making it, and I forgot it had to be related to a",
"#This determines what move the dealer does when it is their turn def",
"blackjack. If you are wondering which focus day this is connected to, it",
"are at the start of the game print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2]))",
"and choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'):",
"prints what your cards are at the start of the game print('Your cards",
"lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21):",
"or stand ') if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards",
"'+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x in",
"dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0 b=0 da=0 db=0 winx=0 losex=0 #defines the",
"\") print() print(\"I started making it, and I forgot it had to be",
"if again2==('yes'): blackjack() if again2==('no'): crash() def tie(): global cardlist global dealerlist global",
"x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21): win() #This determines what move",
"sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while",
"c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6])) if",
"str(carddeck[c5])+', and '+(carddeck[c6])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if",
"print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if",
"cards are \") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[]",
"#defines lists and some random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51))",
"crash() #blakcjack funtion def blackjack(): #define lose, tie, win functions that happen when",
"stand() if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+',",
"determines what move the dealer does when it is their turn def stand():",
"lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2]) standcheck() if sum(dealerlist)>(17):",
"global dealerlistStr print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \")",
"c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and",
"either hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8])",
"are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open card is \"+str(carddeck[c3])) #after the dealer",
"dealer, and hit to draw a new card\") #defines lists and values cardlist=[]",
"had to be related to a focus day, but it was too late",
"c2=0 c3=0 c4=0 a=0 b=0 da=0 db=0 winx=0 losex=0 #defines the list for",
":)') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr)",
"cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or stand?",
"the lists global cardlist global dealerlist global cardlistStr global dealerlistStr #defines lists and",
"late to switch, so here it is \") print() print(\"how to play: your",
"cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again? ') if again2==('yes'): blackjack() if again2==('no'): print('ok')",
"3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,",
"I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card names cardvalue={",
"again2==('yes'): blackjack() if again2==('no'): print('ok') crash() #globals the lists global cardlist global dealerlist",
"you lose, win or tie def lose(): global cardlist global dealerlist global cardlistStr",
"crash() def win(): global cardlist global dealerlist global cardlistStr global dealerlistStr global winx",
"cardlistStr global dealerlistStr global winx winx=winx+1 print('you have won '+str(winx) + \" times",
"here it is \") print() print(\"how to play: your goal is to get",
"global cardlistStr global dealerlistStr #defines lists and some random ints cardlist=[] dealerlist=[] cardlistStr=[]",
"sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'):",
"card is \"+str(carddeck[c3])) #after the dealer finishes their turn, this code checks who",
"print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and",
"cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or stand?",
"again? ') if again==('yes'): blackjack() if again==('no'): crash() else: again=input('yes or no') if",
"print('you lost :(') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are",
"their turn def stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose()",
"tie(): global cardlist global dealerlist global cardlistStr global dealerlistStr print(\"The dealer's cards are",
"else: lose() if sum(dealerlist)>(21): for x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if sum(dealerlist)>(21):",
"are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for x",
"happen when you lose, win or tie def lose(): global cardlist global dealerlist",
"cards are '+str(carddeck[c1])+' and '+str(carddeck[c2])) print(\"The dealer's open card is \"+str(carddeck[c3])) #after the",
"dealerlist global cardlistStr global dealerlistStr print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your",
"again2==('yes'): blackjack() if again2==('no'): crash() if again2 != ('yes') or again2 != ('no'):",
"cardvalue={ 'A': 11, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6,",
"blackjack() if again2==('no'): print('ok') crash() if again2 != ('yes') or again2 != ('no'):",
"crash(): try: crash() except: crash() #blakcjack funtion def blackjack(): #define lose, tie, win",
"to the dealer, and hit to draw a new card\") #defines lists and",
"dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=0 c2=0 c3=0 c4=0 a=0 b=0 da=0 db=0 winx=0 losex=0",
"cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks w1=input('Hit or stand? ') choice1=input('Hit or stand? ')",
"print() print(\"how to play: your goal is to get your card total closest",
"choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'): stand() if",
"print('you won :)') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are",
"win() #This determines what move the dealer does when it is their turn",
"again==('yes'): blackjack() if again==('no'): crash() def win(): global cardlist global dealerlist global cardlistStr",
"cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if sum(cardlist)<(21): choice1=input('Hit or",
"if choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+',",
"them. \") print() print(\"I started making it, and I forgot it had to",
"('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): crash() def tie(): global",
"the list for where I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to",
"again? ') if again2==('yes'): blackjack() if again2==('no'): crash() if again2 != ('yes') or",
"if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51))",
"focus day, but it was too late to switch, so here it is",
"this is connected to, it isn't connected to any of them. \") print()",
"str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1)",
"') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand ') if choice1==('stand'):",
"c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your cards are at the start of the",
"choice1==('hit'): c9=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c9]]) cardlistStr.append(carddeck[c9]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+',",
"or again2 != ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): crash()",
"if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+',",
"'+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for x",
"import time print(\"this game is blackjack. If you are wondering which focus day",
"blackjack() if again2==('no'): print('ok') crash() #globals the lists global cardlist global dealerlist global",
"which focus day this is connected to, it isn't connected to any of",
"db=0 winx=0 losex=0 #defines the list for where I will take card names",
"'+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21:",
"turn def stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if sum(dealerlist)>sum(cardlist): lose() else:",
"'+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if",
"won '+str(winx) + \" times and lost \"+str(losex)+\" times\") print() print('you won :)')",
"if sum(dealerlist)<=(21): if sum(dealerlist)>sum(cardlist): lose() if sum(cardlist)>sum(dealerlist): win() if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie()",
"hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c10=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c10]]) cardlistStr.append(carddeck[c10]) print('Your",
"or again2 != ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): print('ok')",
"choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit or stand",
"give the turn to the dealer, and hit to draw a new card\")",
"to, it isn't connected to any of them. \") print() print(\"I started making",
"total closest to 21, and to beat the dealer. If you get over",
"dealer finishes their turn, this code checks who wins, loses, or ties def",
"'9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10 } #this function",
"or stand ') if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards",
"does when it is their turn def stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist):",
"blackjack(): #define lose, tie, win functions that happen when you lose, win or",
"hit or stand ') if (cardlist)==(21): win() if choice1==('hit'): c5=random.randint(0,51) cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your",
"#asks w1=input('Hit or stand? ') choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'):",
"standcheck() if sum(dealerlist)>sum(cardlist): lose() else: dc1=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc1]]) dealerlistStr.append(carddeck[dc1]) while sum(dealerlist)<=(16): dc2=(random.randint(0,51)) dealerlist.append(cardvalue[carddeck[dc2]]) dealerlistStr.append(carddeck[dc2])",
"random import time print(\"this game is blackjack. If you are wondering which focus",
"cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8])) if sum(cardlist)>21: for",
"times\") print() print('you lost :(') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your",
"'+ str(carddeck[c5])) if sum(cardlist)>(21): for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21):",
"choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+",
"!= ('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): crash() def tie():",
"card names cardvalue={ 'A': 11, '2': 2, '3': 3, '4': 4, '5': 5,",
"') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() if again2 != ('yes') or",
"') if choice1==('stand'): stand() if choice1==('hit'): c6=random.randint(0,51) cardlist.append(cardvalue[carddeck[c6]]) cardlistStr.append(carddeck[c6]) print('Your cards are '+str(carddeck[c1])+',",
"if sum(dealerlist)==(21): if sum(dealerlist)==sum(cardlist): tie() else: lose() if sum(dealerlist)>(21): for x in range(len(dealerlist)):",
"c1=0 c2=0 c3=0 c4=0 a=0 b=0 da=0 db=0 winx=0 losex=0 #defines the list",
"losex losex=losex+1 print('you have won '+str(winx) + \"times and lost \"+str(losex)+\" times\") print()",
"it is their turn def stand(): if sum(dealerlist)>(17): standcheck() if sum(dealerlist)==sum(cardlist): standcheck() if",
"cardlist.append(cardvalue[carddeck[c5]]) cardlistStr.append(carddeck[c5]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5])) if sum(cardlist)>(21): for",
"if again2==('no'): print('ok') crash() #globals the lists global cardlist global dealerlist global cardlistStr",
"take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns values to the card names cardvalue={ 'A': 11,",
"to the card names cardvalue={ 'A': 11, '2': 2, '3': 3, '4': 4,",
"stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+',",
"'+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1)",
"cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() if choice1==('stand'): stand() #a",
"dealerlistStr=[] again2=input('you tied, play again? ') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash()",
"to their resepctive lists cardlist.append(cardvalue[carddeck[c1]]) cardlist.append(cardvalue[carddeck[c2]]) dealerlist.append(cardvalue[carddeck[c3]]) dealerlist.append(cardvalue[carddeck[c4]]) cardlistStr.append(carddeck[c1]) cardlistStr.append(carddeck[c2]) dealerlistStr.append(carddeck[c3]) dealerlistStr.append(carddeck[c4]) #asks",
"lost \"+str(losex)+\" times\") print() print('you won :)') print(\"The dealer's cards are \") print(dealerlistStr)",
"is to get your card total closest to 21, and to beat the",
"print(\"your cards are \") print(cardlistStr) cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] again2=input('you tied, play again?",
":(') print(\"The dealer's cards are \") print(dealerlistStr) print() print(\"your cards are \") print(cardlistStr)",
"') if again==('yes'): blackjack() if again==('no'): crash() else: again=input('yes or no') if again==('yes'):",
"if sum(cardlist)<(21): choice1=input('Hit or stand? ') while choice1!=('hit') and choice1!=('stand'): choice1=input('Pick either hit",
"21, and to beat the dealer. If you get over 21, you lose.",
"dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what your cards are at the",
"c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7]))",
"hit or stand ') if choice1==('stand'): stand() if choice1==('hit'): c11=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c11]]) cardlistStr.append(carddeck[c11]) print('Your",
"related to a focus day, but it was too late to switch, so",
"again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() #globals the lists",
"hit to draw a new card\") #defines lists and values cardlist=[] dealerlist=[] cardlistStr=[]",
"a focus day, but it was too late to switch, so here it",
"tie() else: lose() if sum(dealerlist)>(21): for x in range(len(dealerlist)): if dealerlist[x]==(11): dealerlist[x]=(1) if",
"be related to a focus day, but it was too late to switch,",
"random ints cardlist=[] dealerlist=[] cardlistStr=[] dealerlistStr=[] c1=(random.randint(0,51)) c2=(random.randint(0,51)) c3=(random.randint(0,51)) c4=(random.randint(0,51)) #this prints what",
"beat the dealer. If you get over 21, you lose. stand to give",
"is \"+str(carddeck[c3])) #after the dealer finishes their turn, this code checks who wins,",
"'+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+\" and \"+(carddeck[c11])) if sum(cardlist)>21: for x in range(len(cardlist)): if",
"'J': 10, 'Q': 10, 'K': 10 } #this function crashes python def crash():",
"'+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x in range(len(cardlist)):",
"'+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1)",
"'+str(winx) + \" times and lost \"+str(losex)+\" times\") print() print('you won :)') print(\"The",
"the dealer, and hit to draw a new card\") #defines lists and values",
"or stand ') if choice1==('stand'): stand() if choice1==('hit'): c8=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c8]]) cardlistStr.append(carddeck[c8]) print('Your cards",
"\"+str(losex)+\" times\") print() print('you lost :(') print(\"The dealer's cards are \") print(dealerlistStr) print()",
"if again==('no'): crash() def win(): global cardlist global dealerlist global cardlistStr global dealerlistStr",
"global dealerlistStr global losex losex=losex+1 print('you have won '+str(winx) + \"times and lost",
"lost \"+str(losex)+\" times\") print() print('you lost :(') print(\"The dealer's cards are \") print(dealerlistStr)",
"if sum(dealerlist)>(21): win() #This determines what move the dealer does when it is",
"dealerlistStr=[] again2=input('play again? ') if again2==('yes'): blackjack() if again2==('no'): crash() if again2 !=",
"again2=input('you tied, play again? ') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() if",
"'+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10))) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11):",
"') if choice1==('stand'): stand() if choice1==('hit'): c7=(random.randint(0,51)) cardlist.append(cardvalue[carddeck[c7]]) cardlistStr.append(carddeck[c7]) print('Your cards are '+str(carddeck[c1])+',",
"cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK') win() else: choice1=input('Hit or stand? ')",
"and choice1!=('stand'): choice1=input('Pick either hit or stand ') if (cardlist)==(21): win() if choice1==('hit'):",
"losex=0 #defines the list for where I will take card names carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K'] #Assigns",
"funtion def blackjack(): #define lose, tie, win functions that happen when you lose,",
"('no'): again2=input('yes or no') if again2==('yes'): blackjack() if again2==('no'): print('ok') crash() #globals the",
"and \"+(carddeck[c11])) if sum(cardlist)>21: for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21):",
"blackjack() if again2==('no'): crash() def tie(): global cardlist global dealerlist global cardlistStr global",
"') if again2==('yes'): blackjack() if again2==('no'): crash() if again2 != ('yes') or again2",
"for x in range(len(cardlist)): if cardlist[x]==(11): cardlist[x]=(1) if sum(cardlist)>(21): lose() if sum(cardlist)==21: print('BLACKJACK')"
] |
[
"Invalid todo - [x] Ivalid todo \"\"\", ), ( \"\"\"\\ - [ ]",
"local as Path from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode",
"= { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check',",
"{ 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags(",
"): \"\"\"Test that when given a filepath only _check_file is called\"\"\" # Some",
"_check_dir.assert_not_called() # Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that",
"= check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the",
"# but they don'y have access so they raise value error # Which",
"0 else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check",
"# Test disabling errors # Test passing the flag # Passing the flag",
"assert error['error_type'].get_error_name() not in disabled_errors if valid: assert len(doc_errors['errors']) == 0 else: assert",
"[x] Ivalid todo \"\"\", TodoError.get_error_name(), True, ), ( \"\"\" There is $$invalid$$ math",
"that when given a filepath only _check_file is called\"\"\" # Some parts need",
"mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given a directory path, _check_dir",
"= { 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions",
"c1 = file.read() assert c1 == good # Test disabling errors # Test",
"raise value error # Which can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError:",
"returns as much doc errors as are present in the folder TODO: Make",
"except ValueError: pass # _check_file should be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') #",
"error # When an error is in _disabled_errors is should not be found",
"a directory path, _check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file(",
"ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start')",
"ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start')",
"main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ): \"\"\"Test",
"[ ] Invalid todo There is $$invalid$$ math in this line \"\"\", [MathError.get_error_name(),",
"so they raise value error # Which can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md'])",
"'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True,",
"only is correct $math$ in this line There is one $correct$ and one",
"when the input is not a dir\"\"\" check_mode = CheckMode() # Totally invalid",
"disabling errors # Test passing the flag # Passing the flag should result",
"expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, }",
"# Passing the flag should result in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def",
"Should be good - [x] Deff is good \"\"\", ), ], ) def",
"that _fix_doc_errors is called when fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except",
"= tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath) for",
"( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", \"\"\"\\ - [",
"= { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions",
"unittest.mock import patch import pytest from py.path import local as Path from notesystem.modes.base_mode",
"start() # which is not run in this test check_mode._disabled_errors = [] errors",
"$$invalid$$ math in this line There only is correct $math$ in this line",
"'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, } expected_options: ModeOptions =",
"filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors']",
"= { 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions",
"= [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None assert errors['file_path'] ==",
"main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when the input is",
"patch import pytest from py.path import local as Path from notesystem.modes.base_mode import ModeOptions",
"$math$ in this line There is one $correct$ and one $$wrong$$ math block",
"import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import",
"good - [x] Deff is good \"\"\", ), ], ) def test_check_mode_fix_file(tmpdir, wrong,",
"from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import main def test_required_arguments(): \"\"\"Does the check",
"_check_file: Mock, _check_dir: Mock, ): \"\"\"Test that when given a filepath only _check_file",
"# Test passing the flag # Passing the flag should result in the",
"def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs",
"SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir",
"given a directory path, _check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def",
"should not be called _check_dir.assert_not_called() # Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def",
"test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns as much doc errors as are present",
"in this test check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents') assert len(errors) == 3",
"check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert error['error_type'].get_error_name() not",
"don'y have access so they raise value error # Which can be ignored",
"parts need access to the terminal, # but they don'y have access so",
"> 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check -- should be disabled",
"_check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir:",
"an error is in _disabled_errors is should not be found # in a",
"called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not be called _check_dir.assert_not_called()",
"Only disable todo errors False, ), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents:",
"Mock): \"\"\"Tests that the correct arguments are passed to check mode with only",
"todo \"\"\", \"\"\"\\ - [ ] Invalid todo - [x] Ivalid todo \"\"\",",
"main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False,",
"input path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': False,",
"TodoError.get_error_name(), ], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args,",
"path, _check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock,",
"CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options:",
"correct arguments are passed to check mode with a input path and fixing",
"a document that contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [",
"input is not a dir\"\"\" check_mode = CheckMode() # Totally invalid dir with",
"# Which can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file",
"def test_required_arguments(): \"\"\"Does the check mode fail without in path\"\"\" with pytest.raises(SystemExit): main(['check'])",
"'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock): main(['check', 'tests/test_documents', '--simple-errors'])",
"should be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not be",
"good \"\"\", \"\"\"\\ - [ ] Should be good - [x] Deff is",
"'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", TodoError.get_error_name(),",
"\"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", TodoError.get_error_name(), True, ), (",
"# Some parts need access to the terminal, # but they don'y have",
"_check_file is called\"\"\" # Some parts need access to the terminal, # but",
"'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = { 'visual':",
"check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the file",
"to check mode with a input path and fixing enabled \"\"\" main(['check', 'tests/test_documents',",
"Make test independent of test/test_documents file amount \"\"\" check_mode = CheckMode() # Set",
"disabled_errors: List[str], valid: bool, ): file = tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors",
"[x] Ivalid todo \"\"\", \"\"\"\\ - [ ] Invalid todo - [x] Ivalid",
"one $correct$ and one $$wrong$$ math block \"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\",
"# With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns",
"is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called() # Test errors",
"that _check_dir raises when the input is not a dir\"\"\" check_mode = CheckMode()",
"mode fail without in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests",
"False, ), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors: List[str], valid:",
"'-f']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors': False,",
"main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given a",
"value error # Which can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass",
"_fix_doc_errors is called when fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError:",
"= CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert",
"the flag should result in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock):",
"'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, } expected_options: ModeOptions",
"the correct arguments are passed to check mode with only a input path",
"error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo",
"main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False # Enabled check main(('check', 'in_path', '--simple-errors')) assert",
"mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when",
"_disabled_errors is should not be found # in a document that contains the",
"] Invalid todo [x] Ivalid todo \"\"\", TodoError.get_error_name(), True, ), ( \"\"\" There",
"'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that",
"is given SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test",
"Path, file_contents: str, disabled_errors: List[str], valid: bool, ): file = tmpdir.join('test.md') file.write(file_contents) check_mode",
"Deff is good \"\"\", \"\"\"\\ - [ ] Should be good - [x]",
"else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check --",
"# which is not run in this test check_mode._disabled_errors = [] errors =",
"{ 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start:",
"dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def",
"check mode with a input path and fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f'])",
"import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import",
"\"\"\"Test that _check_file checks the file and returns errors and the correct filepath",
"'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args,",
"{ 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions =",
"@pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\",",
"test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors: List[str], valid: bool, ): file = tmpdir.join('test.md')",
"valid: assert len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock:",
"can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file should be",
"Mock from unittest.mock import patch import pytest from py.path import local as Path",
"expected_options: ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options)",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs = {",
"= disabled_errors doc_errors = check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert error['error_type'].get_error_name() not in",
"the file and returns errors and the correct filepath \"\"\" check_mode = CheckMode()",
"] Invalid todo [x] Ivalid todo \"\"\", \"\"\"\\ - [ ] Invalid todo",
"True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling",
"be good - [x] Deff is good \"\"\", \"\"\"\\ - [ ] Should",
"passed to check mode with a input path and fixing enabled \"\"\" main(['check',",
"} mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that",
"path is given SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir():",
"notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import main def test_required_arguments(): \"\"\"Does the check mode",
"# Set the _disabled_errros manually, because it is set in start() # which",
"= { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def",
"in this line There only is correct $math$ in this line There is",
"assert len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock):",
"'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, } expected_options: ModeOptions = { 'visual':",
"file = tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath)",
"file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 =",
"'fix': False, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True,",
"List[str], valid: bool, ): file = tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors =",
"is in _disabled_errors is should not be found # in a document that",
"# Default check -- should be disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] ==",
"False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore",
"main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [],",
"<reponame>twanh/note-system import os from typing import List from unittest.mock import Mock from unittest.mock",
"Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is",
"import os from typing import List from unittest.mock import Mock from unittest.mock import",
"\"\"\", \"\"\"\\ - [ ] Should be good - [x] Deff is good",
"# When an error is in _disabled_errors is should not be found #",
"be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file should be called",
"- [ ] Should be good - [x] Deff is good \"\"\", \"\"\"\\",
"check mode with only a input path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs =",
"that contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ] Invalid",
"check_mode._fix_doc_errors(errors) c1 = file.read() assert c1 == good # Test disabling errors #",
"folder TODO: Make test independent of test/test_documents file amount \"\"\" check_mode = CheckMode()",
"directory path, _check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file:",
"There is one $correct$ and one $$wrong$$ math block \"\"\", [MathError.get_error_name()], True, ),",
"check_mode dirs returns as much doc errors as are present in the folder",
"errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert c1 == good # Test",
"Test disabling errors # Test passing the flag # Passing the flag should",
"this line There is one $correct$ and one $$wrong$$ math block \"\"\", [MathError.get_error_name()],",
"} mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed",
"is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock,",
"ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are",
"the folder TODO: Make test independent of test/test_documents file amount \"\"\" check_mode =",
"are passed to check mode with a input path and fixing enabled \"\"\"",
"# Check dir should not be called _check_dir.assert_not_called() # Test that fix is",
"), ( \"\"\"\\ - [ ] Should be good - [x] Deff is",
"should not be found # in a document that contains the disabled error",
"dir should not be called _check_dir.assert_not_called() # Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors')",
"only _check_file is called\"\"\" # Some parts need access to the terminal, #",
"notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors",
"len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): #",
"document that contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ]",
"raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when",
"\"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", \"\"\"\\ - [ ]",
"todo \"\"\", ), ( \"\"\"\\ - [ ] Should be good - [x]",
"@pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\",",
"Passing the flag should result in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start:",
"True, ), ( \"\"\" There is $$invalid$$ math in this line There only",
"in this line There is one $correct$ and one $$wrong$$ math block \"\"\",",
"line There is one $correct$ and one $$wrong$$ math block \"\"\", [MathError.get_error_name()], True,",
"math in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\ [ ]",
"check_mode = CheckMode() # Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') #",
"tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1",
"not run in this test check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents') assert len(errors)",
"# Only disable todo errors False, ), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path,",
"assert len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the file and",
"There is $$invalid$$ math in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), (",
"# type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock:",
"[ ] Invalid todo [x] Ivalid todo \"\"\", \"\"\"\\ - [ ] Invalid",
"(false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False # Enabled check main(('check', 'in_path', '--simple-errors'))",
"[ ] Invalid todo - [x] Ivalid todo \"\"\", ), ( \"\"\"\\ -",
"the correct filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md')",
"math in this line There only is correct $math$ in this line There",
"called when fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called()",
"check_mode = CheckMode() check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath) for error in doc_errors['errors']:",
"expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([",
"they raise value error # Which can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except",
"from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from",
"\"\"\", [TodoError.get_error_name()], # Only disable todo errors False, ), ], ) def test_check_mode_disbled_errors_are_not_returned(",
"test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs =",
"expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors': False, }",
"# Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors",
"in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo',",
"todo [x] Ivalid todo \"\"\", TodoError.get_error_name(), True, ), ( \"\"\" There is $$invalid$$",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given a directory path, _check_dir is",
"bool, ): file = tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors = disabled_errors doc_errors",
"'-f']) except ValueError: pass _fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when",
"doc errors as are present in the folder TODO: Make test independent of",
"with pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when the",
"set in start() # which is not run in this test check_mode._disabled_errors =",
"expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors': False, }",
"TODO: Make test independent of test/test_documents file amount \"\"\" check_mode = CheckMode() #",
"only a input path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents',",
"mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling of the error # When an error",
"Should be good - [x] Deff is good \"\"\", \"\"\"\\ - [ ]",
"Invalid todo There is $$invalid$$ math in this line \"\"\", [TodoError.get_error_name()], # Only",
"), ], ) def test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md') file.write(wrong) check_mode =",
"= [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert c1 == good",
"'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args, # type:",
"run in this test check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents') assert len(errors) ==",
"a input path and fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs =",
"try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file should be called with the",
"called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ):",
"= CheckMode() check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert",
"errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [ ] Invalid todo [x]",
"[ ] Should be good - [x] Deff is good \"\"\", ), ],",
"len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check -- should be",
"in the folder TODO: Make test independent of test/test_documents file amount \"\"\" check_mode",
"todo There is $$invalid$$ math in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ),",
"should result in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check',",
"from py.path import local as Path from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import",
"called\"\"\" # Some parts need access to the terminal, # but they don'y",
"- [ ] Invalid todo - [x] Ivalid todo \"\"\", ), ( \"\"\"\\",
"= { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test",
"the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ] Invalid todo [x]",
"unittest.mock import Mock from unittest.mock import patch import pytest from py.path import local",
"[x] Deff is good \"\"\", \"\"\"\\ - [ ] Should be good -",
"be good - [x] Deff is good \"\"\", ), ], ) def test_check_mode_fix_file(tmpdir,",
"a input path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix':",
"\"\"\" check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is",
"math block \"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There",
"\"\"\"Test that when a invalid path is given SystemExit is raised\"\"\" with pytest.raises(SystemExit):",
"that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is called",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ): \"\"\"Test that when given",
"( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", TodoError.get_error_name(), True, ),",
"), ( \"\"\" There is $$invalid$$ math in this line There only is",
"todo [x] Ivalid todo \"\"\", \"\"\"\\ - [ ] Invalid todo - [x]",
"except ValueError: pass _fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a",
"amount \"\"\" check_mode = CheckMode() # Set the _disabled_errros manually, because it is",
"{ 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions =",
"good \"\"\", ), ], ) def test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md') file.write(wrong)",
"of the error # When an error is in _disabled_errors is should not",
"is set in start() # which is not run in this test check_mode._disabled_errors",
"correct filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert",
"they don'y have access so they raise value error # Which can be",
"found # in a document that contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [",
"doc_errors = check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors if",
"errors['errors'] is not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\",
"] Should be good - [x] Deff is good \"\"\", ), ], )",
"and the correct filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors = [] errors =",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to check",
"pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when the input",
"in _disabled_errors is should not be found # in a document that contains",
"def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given a directory path, _check_dir is called\"\"\"",
"When an error is in _disabled_errors is should not be found # in",
"'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args,",
"len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the file and returns",
"[x] Deff is good \"\"\", ), ], ) def test_check_mode_fix_file(tmpdir, wrong, good): file",
"arguments are passed to check mode with a input path and fixing enabled",
"test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given a directory path, _check_dir is called\"\"\" main(['check',",
"# Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid path is given",
"because it is set in start() # which is not run in this",
"is should not be found # in a document that contains the disabled",
"errors as are present in the folder TODO: Make test independent of test/test_documents",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args:",
"pass # _check_file should be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir",
"= file.read() assert c1 == good # Test disabling errors # Test passing",
"CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None assert",
"need access to the terminal, # but they don'y have access so they",
"False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, } expected_options: ModeOptions = {",
"the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not be called _check_dir.assert_not_called() # Test",
"good # Test disabling errors # Test passing the flag # Passing the",
"$correct$ and one $$wrong$$ math block \"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\ [",
"\"\"\"Tests that the correct arguments are passed to check mode with only a",
"] Invalid todo There is $$invalid$$ math in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()],",
"main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to",
"it is set in start() # which is not run in this test",
"checks the file and returns errors and the correct filepath \"\"\" check_mode =",
"line \"\"\", [TodoError.get_error_name()], # Only disable todo errors False, ), ], ) def",
"( \"\"\"\\ - [ ] Should be good - [x] Deff is good",
"There is $$invalid$$ math in this line There only is correct $math$ in",
"to check mode with only a input path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs",
"def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors: List[str], valid: bool, ): file =",
"in doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors if valid: assert len(doc_errors['errors']) == 0",
"def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid path is given SystemExit is raised\"\"\"",
"not be called _check_dir.assert_not_called() # Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors:",
"test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check -- should be disabled (false) main(('check', 'in_path')) assert",
"errors and the correct filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors = [] errors",
"\"\"\"Tests that the correct arguments are passed to check mode with a input",
"TodoError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There is $$invalid$$ math",
"MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import main def test_required_arguments(): \"\"\"Does the",
"'in_path')) assert mock.call_args.args[0]['simple_errors'] == False # Enabled check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors']",
"- [x] Deff is good \"\"\", \"\"\"\\ - [ ] Should be good",
"3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the file and returns errors and",
"todo There is $$invalid$$ math in this line \"\"\", [TodoError.get_error_name()], # Only disable",
"TodoError from notesystem.notesystem import main def test_required_arguments(): \"\"\"Does the check mode fail without",
"Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid path is given SystemExit",
"disabled_errors doc_errors = check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors",
"# _check_file should be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should",
"is not run in this test check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents') assert",
"\"\"\"Test that _fix_doc_errors is called when fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f'])",
"math in this line \"\"\", [TodoError.get_error_name()], # Only disable todo errors False, ),",
"when fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called() #",
"0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check -- should be disabled (false)",
"Check dir should not be called _check_dir.assert_not_called() # Test that fix is called",
"actual disabling of the error # When an error is in _disabled_errors is",
"def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check -- should be disabled (false) main(('check', 'in_path'))",
"for error in doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors if valid: assert len(doc_errors['errors'])",
"{ 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents',",
"== 3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the file and returns errors",
"Ivalid todo \"\"\", \"\"\"\\ - [ ] Invalid todo - [x] Ivalid todo",
"that when a invalid path is given SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check',",
"Invalid todo [x] Ivalid todo \"\"\", \"\"\"\\ - [ ] Invalid todo -",
"Test passing the flag # Passing the flag should result in the _disabled_errors",
"a filepath only _check_file is called\"\"\" # Some parts need access to the",
"filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not be called _check_dir.assert_not_called() # Test that",
"raises when the input is not a dir\"\"\" check_mode = CheckMode() # Totally",
"be called _check_dir.assert_not_called() # Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock):",
"the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ])",
"flag should result in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([",
"]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(),",
"when given a directory path, _check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file')",
"of test/test_documents file amount \"\"\" check_mode = CheckMode() # Set the _disabled_errros manually,",
"= CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None",
"- [x] Ivalid todo \"\"\", ), ( \"\"\"\\ - [ ] Should be",
"is good \"\"\", ), ], ) def test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md')",
") def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors: List[str], valid: bool, ): file",
"'--disable-todo', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors':",
"ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) #",
"import patch import pytest from py.path import local as Path from notesystem.modes.base_mode import",
"dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when the input is not a",
"\"\"\", \"\"\"\\ - [ ] Invalid todo - [x] Ivalid todo \"\"\", ),",
"notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import main def test_required_arguments():",
"[ ] Should be good - [x] Deff is good \"\"\", \"\"\"\\ -",
"print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given",
"ValueError: pass # _check_file should be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check",
"mode with only a input path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = {",
"Mock): \"\"\"Tests that the correct arguments are passed to check mode with a",
"as are present in the folder TODO: Make test independent of test/test_documents file",
"ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test",
"\"\"\"\\ - [ ] Should be good - [x] Deff is good \"\"\",",
"== True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock): main(['check', 'tests/test_documents', '--simple-errors']) assert mock.call_count == len(os.listdir('tests/test_documents'))",
"one $$wrong$$ math block \"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid",
"{ 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the",
"Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs = { 'in_path':",
"True, ), ( \"\"\"\\ [ ] Invalid todo There is $$invalid$$ math in",
"= CheckMode() # Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With",
"= [] errors = check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test that",
"todo - [x] Ivalid todo \"\"\", ), ( \"\"\"\\ - [ ] Should",
"False # Enabled check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def",
"check mode fail without in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock):",
"'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors':",
"], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args, #",
"notesystem.notesystem import main def test_required_arguments(): \"\"\"Does the check mode fail without in path\"\"\"",
"'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors':",
"todo errors False, ), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors:",
"\"\"\", TodoError.get_error_name(), True, ), ( \"\"\" There is $$invalid$$ math in this line",
"todo \"\"\", TodoError.get_error_name(), True, ), ( \"\"\" There is $$invalid$$ math in this",
"'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file should be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md')",
"[ ] Invalid todo [x] Ivalid todo \"\"\", TodoError.get_error_name(), True, ), ( \"\"\"",
"'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ): \"\"\"Test that",
"main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock): main(['check', 'tests/test_documents',",
"from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import main def",
"CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors': False, } expected_options:",
"test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is called when fixing is enabled\"\"\" try: main(['check',",
"assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [ ] Invalid todo",
"CheckMode() # Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With filepath",
"'--disable-math-error', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(),",
"access to the terminal, # but they don'y have access so they raise",
"when given a filepath only _check_file is called\"\"\" # Some parts need access",
"os from typing import List from unittest.mock import Mock from unittest.mock import patch",
"the check mode fail without in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start:",
"that the correct arguments are passed to check mode with a input path",
"ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file should be called with",
"test/test_documents file amount \"\"\" check_mode = CheckMode() # Set the _disabled_errros manually, because",
"} mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling of the error # When an",
"block \"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There is",
"'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given a directory",
"\"\"\", ), ], ) def test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md') file.write(wrong) check_mode",
"import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import",
"passed to check mode with only a input path \"\"\" main(['check', 'tests/test_documents']) expected_args:",
"_check_dir: Mock, ): \"\"\"Test that when given a filepath only _check_file is called\"\"\"",
"in start() # which is not run in this test check_mode._disabled_errors = []",
"\"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There is $$invalid$$",
"if valid: assert len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def",
"# Enabled check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock:",
"the terminal, # but they don'y have access so they raise value error",
"that the correct arguments are passed to check mode with only a input",
"CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError",
"fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called() # Test",
"\"\"\"\\ [ ] Invalid todo There is $$invalid$$ math in this line \"\"\",",
"good): file = tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors = [] errors =",
"when a invalid path is given SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no",
"True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock,",
"Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix':",
"): file = tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors = disabled_errors doc_errors =",
"'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions = { 'visual':",
"much doc errors as are present in the folder TODO: Make test independent",
"ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start)",
"= check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good',",
"False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args':",
"with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not be called _check_dir.assert_not_called() #",
"Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With filepath with pytest.raises(NotADirectoryError):",
"check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert c1 == good # Test disabling errors",
"be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not be called",
"{ 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions =",
"# Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With filepath with",
"this line \"\"\", [TodoError.get_error_name()], # Only disable todo errors False, ), ], )",
"'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start:",
"arguments are passed to check mode with only a input path \"\"\" main(['check',",
"from notesystem.notesystem import main def test_required_arguments(): \"\"\"Does the check mode fail without in",
"'wrong,good', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", \"\"\"\\",
"disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid",
"\"\"\" There is $$invalid$$ math in this line There only is correct $math$",
"== False # Enabled check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error')",
"'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that",
"test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ): \"\"\"Test that when given a filepath only",
"type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments",
"type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling of the error #",
"Ivalid todo \"\"\", TodoError.get_error_name(), True, ), ( \"\"\" There is $$invalid$$ math in",
"There is $$invalid$$ math in this line \"\"\", [TodoError.get_error_name()], # Only disable todo",
"[], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args, #",
"def test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors =",
"error # Which can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass #",
"= check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors if valid:",
"in this line \"\"\", [TodoError.get_error_name()], # Only disable todo errors False, ), ],",
"from unittest.mock import Mock from unittest.mock import patch import pytest from py.path import",
"that check_mode dirs returns as much doc errors as are present in the",
"dirs returns as much doc errors as are present in the folder TODO:",
"), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors: List[str], valid: bool,",
"Some parts need access to the terminal, # but they don'y have access",
"_disabled_errros manually, because it is set in start() # which is not run",
"this test check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def",
"filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns as much",
"Default check -- should be disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False",
"without in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the",
"ValueError: pass _fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid",
"\"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors':",
"pytest from py.path import local as Path from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode",
"pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed",
"_disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args:",
"check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read()",
"'tests/test_documents', '-f']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors':",
") def test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors",
"def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to check mode",
"path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors':",
"file_contents: str, disabled_errors: List[str], valid: bool, ): file = tmpdir.join('test.md') file.write(file_contents) check_mode =",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to check",
"not be found # in a document that contains the disabled error @pytest.mark.parametrize(",
"assert mock.call_args.args[0]['simple_errors'] == False # Enabled check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] ==",
"type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md',",
"} expected_options: ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore }",
"returns errors and the correct filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors = []",
"should be disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False # Enabled check",
"\"\"\"Does the check mode fail without in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def",
"check_mode._check_dir('not a dir') # With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that",
"== 0 else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default",
"expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ],",
"check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None assert errors['file_path']",
"c1 == good # Test disabling errors # Test passing the flag #",
"given a filepath only _check_file is called\"\"\" # Some parts need access to",
"test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid path is given SystemExit is raised\"\"\" with",
"[] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md'",
"check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [",
"which is not run in this test check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents')",
"= { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def",
"not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [ ]",
"with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns():",
"] Invalid todo - [x] Ivalid todo \"\"\", ), ( \"\"\"\\ - [",
"# in a document that contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ (",
"that when given a directory path, _check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir')",
"main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file should be called with the filepath",
"check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors if valid: assert",
"in disabled_errors if valid: assert len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors']) > 0",
"test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the file and returns errors and the correct",
"\"\"\" check_mode = CheckMode() # Set the _disabled_errros manually, because it is set",
"Which can be ignored try: main(['check', 'tests/test_documents/ast_error_test_1.md']) except ValueError: pass # _check_file should",
"\"\"\"Test that when given a directory path, _check_dir is called\"\"\" main(['check', 'tests/test_documents']) mock.assert_called_once_with('tests/test_documents')",
"errors False, ), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors: List[str],",
"\"\"\"Test that when given a filepath only _check_file is called\"\"\" # Some parts",
"check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test",
"dir\"\"\" check_mode = CheckMode() # Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir')",
"True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options)",
"'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ):",
"try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file():",
"import local as Path from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from",
"the actual disabling of the error # When an error is in _disabled_errors",
"main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix':",
"], ) def test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode()",
"not in disabled_errors if valid: assert len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors']) >",
"test independent of test/test_documents file amount \"\"\" check_mode = CheckMode() # Set the",
"import TodoError from notesystem.notesystem import main def test_required_arguments(): \"\"\"Does the check mode fail",
"'--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock): main(['check', 'tests/test_documents', '--simple-errors']) assert",
"test_required_arguments(): \"\"\"Does the check mode fail without in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start')",
"( \"\"\"\\ [ ] Invalid todo There is $$invalid$$ math in this line",
"is good \"\"\", \"\"\"\\ - [ ] Should be good - [x] Deff",
"tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath) for error",
"terminal, # but they don'y have access so they raise value error #",
"fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is called when",
"as much doc errors as are present in the folder TODO: Make test",
"'--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock): \"\"\"Test that when given a directory path,",
"\"\"\"\\ - [ ] Invalid todo - [x] Ivalid todo \"\"\", ), (",
"Path from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs",
"def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs = { 'in_path':",
"is $$invalid$$ math in this line There only is correct $math$ in this",
"== good # Test disabling errors # Test passing the flag # Passing",
"called _check_dir.assert_not_called() # Test that fix is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test",
"\"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There is",
"invalid path is given SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir']) def",
"= tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors)",
"expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the",
"'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual",
"main def test_required_arguments(): \"\"\"Does the check mode fail without in path\"\"\" with pytest.raises(SystemExit):",
"notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem",
"the _disabled_errros manually, because it is set in start() # which is not",
"from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from",
"is $$invalid$$ math in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ): \"\"\"Test that when given a",
"With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns as",
"with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns as much doc",
"present in the folder TODO: Make test independent of test/test_documents file amount \"\"\"",
"is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises",
"{ 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, }",
"= CheckMode() # Set the _disabled_errros manually, because it is set in start()",
"mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ])",
"True, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args':",
"notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors",
"- [ ] Should be good - [x] Deff is good \"\"\", ),",
"[TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args': expected_args, #",
"this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo",
"assert errors['errors'] is not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ (",
"Ivalid todo \"\"\", ), ( \"\"\"\\ - [ ] Should be good -",
"- [x] Deff is good \"\"\", ), ], ) def test_check_mode_fix_file(tmpdir, wrong, good):",
"access so they raise value error # Which can be ignored try: main(['check',",
"is not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [",
"ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo',",
"disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False # Enabled check main(('check', 'in_path',",
"input path and fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs = {",
"[x] Ivalid todo \"\"\", ), ( \"\"\"\\ - [ ] Should be good",
"] Should be good - [x] Deff is good \"\"\", \"\"\"\\ - [",
"Mock): # Default check -- should be disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors']",
"disabled_errors if valid: assert len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run')",
"} mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error',",
"correct $math$ in this line There is one $correct$ and one $$wrong$$ math",
"but they don'y have access so they raise value error # Which can",
"test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when the input is not a dir\"\"\" check_mode",
"error['error_type'].get_error_name() not in disabled_errors if valid: assert len(doc_errors['errors']) == 0 else: assert len(doc_errors['errors'])",
"correct arguments are passed to check mode with only a input path \"\"\"",
"'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False,",
"\"\"\"Test that check_mode dirs returns as much doc errors as are present in",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is called when fixing is enabled\"\"\"",
"[] errors = check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file",
"check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not",
"is $$invalid$$ math in this line \"\"\", [TodoError.get_error_name()], # Only disable todo errors",
"expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling of the",
"'fix': True, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True,",
"\"\"\", ), ( \"\"\"\\ - [ ] Should be good - [x] Deff",
"file and returns errors and the correct filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors",
"CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert c1",
"assert c1 == good # Test disabling errors # Test passing the flag",
"mock.assert_called_once_with('tests/test_documents') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file') def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ): \"\"\"Test that when",
"]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False,",
"error is in _disabled_errors is should not be found # in a document",
"There only is correct $math$ in this line There is one $correct$ and",
"is correct $math$ in this line There is one $correct$ and one $$wrong$$",
"def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is called when fixing is enabled\"\"\" try:",
"$$invalid$$ math in this line \"\"\", [TodoError.get_error_name()], # Only disable todo errors False,",
"'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = { 'visual':",
"file.read() assert c1 == good # Test disabling errors # Test passing the",
"the flag # Passing the flag should result in the _disabled_errors being set",
"TodoError.get_error_name(), True, ), ( \"\"\" There is $$invalid$$ math in this line There",
"assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock): main(['check', 'tests/test_documents', '--simple-errors']) assert mock.call_count",
"_check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not be called _check_dir.assert_not_called() # Test that fix",
"def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns as much doc errors as are",
"import pytest from py.path import local as Path from notesystem.modes.base_mode import ModeOptions from",
"expected_options: ModeOptions = { 'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options)",
"'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix'])",
"is one $correct$ and one $$wrong$$ math block \"\"\", [MathError.get_error_name()], True, ), (",
"main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors':",
"main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test",
"Set the _disabled_errros manually, because it is set in start() # which is",
"[] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert c1 == good #",
"in a document that contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\",
"a invalid path is given SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir'])",
"in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct",
"from unittest.mock import patch import pytest from py.path import local as Path from",
"called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is called when fixing is",
"be found # in a document that contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid',",
"): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md',",
"CheckMode() check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath) for error in doc_errors['errors']: assert error['error_type'].get_error_name()",
"errors # Test passing the flag # Passing the flag should result in",
"manually, because it is set in start() # which is not run in",
"'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling of",
"in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid",
"'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors': False,",
"disable todo errors False, ), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str,",
"'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()],",
"= { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False,",
"\"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [],",
"mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to",
"errors = check_mode._check_file('tests/test_documents/contains_errors.md') assert errors['errors'] is not None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize(",
"[ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", TodoError.get_error_name(), True,",
"Enabled check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock):",
"from typing import List from unittest.mock import Mock from unittest.mock import patch import",
"( \"\"\" There is $$invalid$$ math in this line There only is correct",
"'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, } expected_options:",
"# Test the actual disabling of the error # When an error is",
"disabling of the error # When an error is in _disabled_errors is should",
"the correct arguments are passed to check mode with a input path and",
"fail without in path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that",
"type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def test_check_mode_checks_dir_when_given_dir(mock: Mock):",
"CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors':",
"def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to check mode",
"line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There",
"py.path import local as Path from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode",
"good - [x] Deff is good \"\"\", \"\"\"\\ - [ ] Should be",
"Mock): \"\"\"Test that when given a directory path, _check_dir is called\"\"\" main(['check', 'tests/test_documents'])",
"path and fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs = { 'in_path':",
"str, disabled_errors: List[str], valid: bool, ): file = tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode()",
"True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests",
"'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir')",
"check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns as much doc errors as",
"None assert errors['file_path'] == 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [ ] Invalid",
"with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are",
"not a dir\"\"\" check_mode = CheckMode() # Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not",
"flag # Passing the flag should result in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start')",
"'visual': True, 'args': expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock):",
"mode with a input path and fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args:",
"_check_file should be called with the filepath _check_file.assert_called_with('tests/test_documents/ast_error_test_1.md') # Check dir should not",
"check -- should be disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False #",
"check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert c1 ==",
"'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [TodoError.get_error_name()], 'simple_errors': False, } expected_options: ModeOptions = {",
"] Invalid todo There is $$invalid$$ math in this line \"\"\", [TodoError.get_error_name()], #",
"is called when fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass",
"= check_mode._check_file(file.strpath) check_mode._fix_doc_errors(errors) c1 = file.read() assert c1 == good # Test disabling",
"Test the actual disabling of the error # When an error is in",
"List from unittest.mock import Mock from unittest.mock import patch import pytest from py.path",
"CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors': False, } expected_options:",
"independent of test/test_documents file amount \"\"\" check_mode = CheckMode() # Set the _disabled_errros",
"MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args':",
"this line There only is correct $math$ in this line There is one",
"mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock): main(['check', 'tests/test_documents', '--simple-errors']) assert mock.call_count ==",
"a dir\"\"\" check_mode = CheckMode() # Totally invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a",
"CheckMode() # Set the _disabled_errros manually, because it is set in start() #",
"def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks the file and returns errors and the",
"result in the _disabled_errors being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md',",
"enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md', '-f']) except ValueError: pass _fix_doc_errors.assert_called() # Test errors def",
"file amount \"\"\" check_mode = CheckMode() # Set the _disabled_errros manually, because it",
"file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors = disabled_errors doc_errors = check_mode._check_file(file.strpath) for error in",
"# type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_multiple_flags( mock_check_mode_start: Mock, ): main([ 'check',",
"errors = check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def test_check_mode_check_file_returns(): \"\"\"Test that _check_file checks",
"file = tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors = [] errors = check_mode._check_file(file.strpath)",
"mock_check_mode_start: Mock, ): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs = {",
"filepath only _check_file is called\"\"\" # Some parts need access to the terminal,",
"invalid dir with pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md')",
"_check_file checks the file and returns errors and the correct filepath \"\"\" check_mode",
"CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import main",
"], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir: Path, file_contents: str, disabled_errors: List[str], valid: bool, ):",
"[ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo \"\"\", \"\"\"\\ -",
"import Mock from unittest.mock import patch import pytest from py.path import local as",
"are present in the folder TODO: Make test independent of test/test_documents file amount",
"typing import List from unittest.mock import Mock from unittest.mock import patch import pytest",
"the input is not a dir\"\"\" check_mode = CheckMode() # Totally invalid dir",
"\"\"\"Test that _check_dir raises when the input is not a dir\"\"\" check_mode =",
"with a input path and fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs",
"have access so they raise value error # Which can be ignored try:",
"$$wrong$$ math block \"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo",
"[ ] Invalid todo There is $$invalid$$ math in this line \"\"\", [TodoError.get_error_name()],",
"def test_check_mode_checks_file_when_given_file( _check_file: Mock, _check_dir: Mock, ): \"\"\"Test that when given a filepath",
"errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid path is given SystemExit is",
"is called @patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors') def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock): \"\"\"Test that _fix_doc_errors is called when fixing",
"test check_mode._disabled_errors = [] errors = check_mode._check_dir('tests/test_documents') assert len(errors) == 3 def test_check_mode_check_file_returns():",
"'in_path': 'tests/test_documents', 'fix': True, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = {",
"-- should be disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False # Enabled",
"and one $$wrong$$ math block \"\"\", [MathError.get_error_name()], True, ), ( \"\"\"\\ [ ]",
"Deff is good \"\"\", ), ], ) def test_check_mode_fix_file(tmpdir, wrong, good): file =",
"ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling of the error # When",
"Invalid todo There is $$invalid$$ math in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True,",
"False, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True, 'args':",
"Mock, ): \"\"\"Test that when given a filepath only _check_file is called\"\"\" #",
"import main def test_required_arguments(): \"\"\"Does the check mode fail without in path\"\"\" with",
"[MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There is $$invalid$$",
"dir') # With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs",
"'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid todo",
"test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to check mode with",
"given SystemExit is raised\"\"\" with pytest.raises(SystemExit): main(['check', 'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that",
"'no dir']) def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when the input is not",
"and returns errors and the correct filepath \"\"\" check_mode = CheckMode() check_mode._disabled_errors =",
"mock.call_args.args[0]['simple_errors'] == False # Enabled check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True",
"Mock): \"\"\"Test that _fix_doc_errors is called when fixing is enabled\"\"\" try: main(['check', 'tests/test_documents/ast_error_test_1.md',",
"== 'tests/test_documents/contains_errors.md' @pytest.mark.parametrize( 'wrong,good', [ ( \"\"\"\\ [ ] Invalid todo [x] Ivalid",
"that _check_file checks the file and returns errors and the correct filepath \"\"\"",
"a dir') # With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode",
"line There only is correct $math$ in this line There is one $correct$",
"be disabled (false) main(('check', 'in_path')) assert mock.call_args.args[0]['simple_errors'] == False # Enabled check main(('check',",
"wrong, good): file = tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors = [] errors",
"valid: bool, ): file = tmpdir.join('test.md') file.write(file_contents) check_mode = CheckMode() check_mode._disabled_errors = disabled_errors",
"$$invalid$$ math in this line \"\"\", [MathError.get_error_name(), TodoError.get_error_name()], True, ), ( \"\"\"\\ [",
"as Path from notesystem.modes.base_mode import ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import",
"'in_path': 'tests/test_documents', 'fix': False, 'disabled_errors': [], 'simple_errors': False, } expected_options: ModeOptions = {",
"@patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check -- should be disabled (false) main(('check',",
"check main(('check', 'in_path', '--simple-errors')) assert mock.call_args.args[0]['simple_errors'] == True @patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error') def test_print_simple_doc_error_is_called(mock: Mock): main(['check',",
"doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors if valid: assert len(doc_errors['errors']) == 0 else:",
"_check_dir raises when the input is not a dir\"\"\" check_mode = CheckMode() #",
"pass _fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid path",
"test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments are passed to check mode with",
"passing the flag # Passing the flag should result in the _disabled_errors being",
"is called\"\"\" # Some parts need access to the terminal, # but they",
"# type: ignore } mock_check_mode_start.assert_called_once_with(expected_options) # Test the actual disabling of the error",
"fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix':",
"check_mode = CheckMode() # Set the _disabled_errros manually, because it is set in",
"import MathError from notesystem.modes.check_mode.errors.markdown_errors import TodoError from notesystem.notesystem import main def test_required_arguments(): \"\"\"Does",
"pytest.raises(NotADirectoryError): check_mode._check_dir('not a dir') # With filepath with pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test",
"is not a dir\"\"\" check_mode = CheckMode() # Totally invalid dir with pytest.raises(NotADirectoryError):",
"expected_args, # type: ignore } mock_check_mode_start.assert_called_with(expected_options) print(mock_check_mode_start) main(['check', 'tests/test_documents', '--fix']) mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir') def",
"tmpdir: Path, file_contents: str, disabled_errors: List[str], valid: bool, ): file = tmpdir.join('test.md') file.write(file_contents)",
"assert len(doc_errors['errors']) > 0 @patch('notesystem.modes.check_mode.check_mode.CheckMode._run') def test_simple_errors_is_passed_through_correctly(mock: Mock): # Default check -- should",
"from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError from",
"pytest.raises(NotADirectoryError): check_mode._check_dir('tests/test_documents/ast_error_test_1.md') def test_check_mode_check_dir_returns(): \"\"\"Test that check_mode dirs returns as much doc errors",
"import List from unittest.mock import Mock from unittest.mock import patch import pytest from",
"the error # When an error is in _disabled_errors is should not be",
"contains the disabled error @pytest.mark.parametrize( 'file_contents,disabled_errors,valid', [ ( \"\"\"\\ [ ] Invalid todo",
"Mock, _check_dir: Mock, ): \"\"\"Test that when given a filepath only _check_file is",
"set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs =",
"being set @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs",
"'--disable-todo', '--disable-math-error', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md', 'fix': False, 'disabled_errors': [",
"with only a input path \"\"\" main(['check', 'tests/test_documents']) expected_args: CheckModeArgs = { 'in_path':",
"# type: ignore } mock_check_mode_start.assert_called_with(expected_options) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock): \"\"\"Tests that the correct",
"enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents', 'fix': True,",
"ModeOptions from notesystem.modes.check_mode.check_mode import CheckMode from notesystem.modes.check_mode.check_mode import CheckModeArgs from notesystem.modes.check_mode.errors.markdown_errors import MathError",
"and fixing enabled \"\"\" main(['check', 'tests/test_documents', '-f']) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents',",
"path\"\"\" with pytest.raises(SystemExit): main(['check']) @patch('notesystem.modes.check_mode.check_mode.CheckMode.start') def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock): \"\"\"Tests that the correct arguments",
"to the terminal, # but they don'y have access so they raise value",
"test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock): main([ 'check', 'tests/test_documents/contains_errors.md', '--disable-todo', ]) expected_args: CheckModeArgs = { 'in_path': 'tests/test_documents/contains_errors.md',",
"_fix_doc_errors.assert_called() # Test errors def test_check_mode_raises_with_non_existing_dir_or_file(): \"\"\"Test that when a invalid path is",
"are passed to check mode with only a input path \"\"\" main(['check', 'tests/test_documents'])",
"error in doc_errors['errors']: assert error['error_type'].get_error_name() not in disabled_errors if valid: assert len(doc_errors['errors']) ==",
"test_check_mode_fix_file(tmpdir, wrong, good): file = tmpdir.join('test.md') file.write(wrong) check_mode = CheckMode() check_mode._disabled_errors = []",
"[MathError.get_error_name()], True, ), ( \"\"\"\\ [ ] Invalid todo There is $$invalid$$ math",
"[ MathError.get_error_name(), TodoError.get_error_name(), ], 'simple_errors': False, } expected_options: ModeOptions = { 'visual': True,",
"), ( \"\"\"\\ [ ] Invalid todo There is $$invalid$$ math in this",
"[TodoError.get_error_name()], # Only disable todo errors False, ), ], ) def test_check_mode_disbled_errors_are_not_returned( tmpdir:",
"Invalid todo [x] Ivalid todo \"\"\", TodoError.get_error_name(), True, ), ( \"\"\" There is",
"def test_check_mode_check_dir_raises_with_file_and_not_existing_dir(): \"\"\"Test that _check_dir raises when the input is not a dir\"\"\""
] |
[
"and expected lists differ only in that their lines are permutations of each",
"List of names of matching reference csv file. The location of the reference",
"check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check that an in-memory Pandas dataframe matches",
"will be considered to be the same if they only differ in substrings",
"from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is the default",
"take two parameters: - a value (which should evaluate as true for the",
"rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFileCorrect(self,",
"that an in-memory string matches the contents from a reference text file. string",
"test-driven data analysis. Source repository: http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic Solutions Limited",
"verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\" Declare the filesystem location for reference",
"matches a reference one. actual_path Actual csv file. ref_csv Name of reference csv",
"reference output with the actual output. This, obviously, should be used only after",
"Set the regeneration flag for a particular kind of reference file, globally, for",
"set_data_location() can be used to set the per-kind data locations for an individual",
"precision Number of decimal places to compare float values. loader Function to use",
"this limit, then the two are considered to be identical. This should be",
"tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\" Declare the filesystem location for reference files",
"**kwargs): \"\"\" Sometimes the framework needs to print messages. By default, it will",
"field names to use. Raises NotImplementedError if Pandas is not available. \"\"\" r",
"basis. rstrip if set to true, both strings are right stripped before the",
"the actual string. ref_csv is the name of the reference csv file. The",
"Copyright (c) Stochastic Solutions Limited 2016 \"\"\" from __future__ import absolute_import from __future__",
"Sets the boolean verbose flag globally, to control reporting of errors while running",
"method for regenerating reference data from in-memory results. \"\"\" with open(reference_path, 'w') as",
"useful to be able to see information from failing tests as they happen,",
"and not os.path.isabs(path): if kind not in self.reference_data_locations: kind = None if kind",
"a per-line basis. ignore_substrings is an optional list of substrings; lines containing any",
"of, and drop-in replacement for unittest.TestCase. It extends that class with all of",
"of field names - a function taking a dataframe as its single parameter,",
"path = os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference data location for \"%s\"' %",
"paths for text files. ref_paths is a list of names of the matching",
"string, NaN, and NULL keep_default_na is False Raises NotImplementedError if Pandas is not",
"of the reference file is determined by the configuration via set_data_location(). kind is",
"for kind=None, which *must* be specified. If you haven't even defined the None",
"through python's standard unittest framework, via the referencetestcase module. This provides the ReferenceTestCase",
"loader is used. The check_* comparison flags can be of any of the",
"of the files after preprocessing has taken place, so preprocessed versions of the",
"names of matching reference csv file. The location of the reference files is",
"if set to true, both strings are left stripped before the comparison is",
"the reference file is determined by the configuration via set_data_location(). actual_path Optional parameter,",
"giving path for file where actual dataframe originated, used for error messages. expected_path",
"overrides any global defaults set from calls to the set_default_data_location class-method. If you",
"used for error messages. kind Reference kind, used to locate the reference csv",
"correct, either because the previous output was in fact wrong, or because the",
"functionality is also available through the pytest framework, via the referencepytest module. This",
"k == 'print_fn': cls.print_fn = kwargs[k] elif k == 'tmp_dir': cls.tmp_dir = kwargs[k]",
"identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc =",
"has been specified. It's useful to be able to see the contents of",
"method for resolving a list of reference data files, all of the same",
"ignore_substrings ip = ignore_patterns mpc = max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip,",
"actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a",
"are run. This should be set using the # set_regeneration() class-method. Can be",
"diff command is suggested for seeing the differences between the actual output and",
"display information while running tests. The function should have the same signature as",
"comparing. condition Filter to be applied to datasets before comparing. It can be",
"This should be used for unstructured data such as logfiles, etc. For csv",
"\"\"\" # Verbose flag verbose = True # Temporary directory tmp_dir = DEFAULT_FAIL_DIR",
"happen, rather than waiting for the full report at the end. Verbose is",
"self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\" Declare the filesystem",
"csv file to obtain a pandas dataframe. If None, then a default csv",
"any global defaults set from calls to the set_default_data_location class-method. If you haven't",
"and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\" Internal method for regenerating reference data.",
"r self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None,",
"between a string and a file fails, the actual string is written to",
"the boolean verbose flag globally, to control reporting of errors while running tests.",
"optional list of regular expressions; lines will be considered to be the same",
"to see the contents of the files after preprocessing has taken place, so",
"of the reference file is determined by the configuration via set_data_location(). kind Reference",
"reference files is determined by the configuration via set_data_location(). kind Reference kind, used",
"regenerate reference data of that kind, rather than comparing. All of the regenerate",
"you make calls to assertFileCorrect() (etc) using relative pathnames for the reference data",
"rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a file matches the contents",
"default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework needs to print messages. By default, it",
"self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0):",
"names of the matching reference files. The location of the reference files is",
"of names of matching reference csv file. The location of the reference files",
"ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a collection of files matche the contents",
"how a test failed, if the value does not evaluate as true). \"\"\"",
"to __init__. \"\"\" print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush() # Default print",
"kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path,",
"in these locations though its __init__ method when constructing an instance of ReferenceTestCase",
"reference files of a particular kind. Typically you would subclass ReferenceTestCase and pass",
"initialized by set_default_data_location(). default_data_locations = {} @classmethod def set_defaults(cls, **kwargs): \"\"\" Set default",
"limit, then the two are considered to be identical. This should be used",
"the whole line. preprocess is an optional function that takes a list of",
"of the methods from ReferenceTest. The functionality is also available through the pytest",
"set_default_data_location(). default_data_locations = {} @classmethod def set_defaults(cls, **kwargs): \"\"\" Set default parameters, at",
"things like version numbers and timestamps that vary in the output from run",
"the reference csv file. The location of the reference file is determined by",
"actual_path Optional parameter, giving path for file where actual dataframe originated, used for",
"useful for filtering out things like version numbers and timestamps that vary in",
"The functionality provided by this class is available through python's standard unittest framework,",
"(to apply that kind of comparison to all fields) - False (to skip",
"the same parameters as the standard pandas pd.read_csv() function. check_data Option to specify",
"the previous output was in fact wrong, or because the intended behaviour has",
"self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None,",
"list of paths for text files. ref_paths is a list of names of",
"use assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else:",
"r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r",
"r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs)",
"the reference files is determined by the configuration via set_data_location(). kind is the",
"as its single parameter, and returning a list of field names to use.",
"the environment variable TDDA_FAIL_DIR is used, or, if that is not defined, it",
"self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference data location for \"%s\"'",
"the strings/files that contain particular patterns or regular expressions. This is typically useful",
"reference file is determined by the configuration via set_data_location(). actual_path Optional parameter, giving",
"# Verbose flag verbose = True # Temporary directory tmp_dir = DEFAULT_FAIL_DIR #",
"both strings are left stripped before the comparison is carried out. Note: the",
"lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that an in-memory string matches",
"kwargs[k] elif k == 'tmp_dir': cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults: Unrecogized option",
"to specify the directory where temporary files are written. Temporary files are created",
"wrapper around pandas pd.read_csv(), with default options as follows: index_col is None infer_datetime_format",
"(failures, msgs) = r self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None): \"\"\" Internal method",
"tests are run. This should be set using the # set_regeneration() class-method. Can",
"\"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path,",
"reference data files, then it can't check correctness, so it will raise an",
"as logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths,",
"in paths] def should_regenerate(self, kind): \"\"\" Internal method to determine if a particular",
"r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None,",
"__future__ import absolute_import from __future__ import print_function from __future__ import division from __future__",
"preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None,",
"else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) =",
"print_fn parameter to __init__. \"\"\" print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush() #",
"reference files. The location of the reference files is determined by the configuration",
"ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that an in-memory",
"def check_failures(self, failures, msgs): \"\"\" Internal method for check for failures and reporting",
"both the actual and expected. max_permutation_cases is an optional number specifying the maximum",
"# Temporary directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing which kinds of reference",
"even defined the None default, and you make calls to assertFileCorrect() (etc) using",
"regenerate flags are set to False by default. \"\"\" cls.regenerate[kind] = regenerate @classmethod",
"only in that their lines are permutations of each other, and the number",
"globally, to specify the directory where temporary files are written. Temporary files are",
"the framework will regenerate reference data of that kind, rather than comparing. All",
"if kind not in self.reference_data_locations: kind = None if kind in self.reference_data_locations: path",
"ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check",
"logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind)",
"when constructing an instance of ReferenceTestCase as a superclass. If calls to assertFileCorrect()",
"of the ReferenceTest class subsequently created. The instance method set_data_location() can be used",
"of the following: - None (to apply that kind of comparison to all",
"print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush() # Default print function print_fn =",
"for check for failures and reporting them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod",
"r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None,",
"dataframe. ref_df Expected dataframe. actual_path Optional parameter, giving path for file where actual",
"= max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc)",
"been specified using a relative path. \"\"\" if self.reference_data_locations and not os.path.isabs(path): if",
"on a per-line basis. rstrip if set to true, both strings are right",
"made available as top-level functions, # to work will with frameworks like pytest.",
"output with the actual output. This, obviously, should be used only after careful",
"locations for an individual instance of the class. If calls to assertFileCorrect() (etc)",
"verbose flag globally, to control reporting of errors while running tests. Reference tests",
"# -*- coding: utf-8 -*- \"\"\" referencetest.py: refererence testing for test-driven data analysis.",
"defaults set from calls to the set_default_data_location class-method. If you haven't even defined",
"kind is the reference kind, used to locate the reference files. lstrip if",
"expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs)",
"a list of paths for text files. ref_paths is a list of names",
"csv file. df Actual dataframe. ref_csv Name of reference csv file. The location",
"True quotechar is \"\" quoting is csv.QUOTE_MINIMAL escapechar is \\ na_values are the",
"# Magic so that an instance of this class can masquerade as a",
"globally, and will affect all instances of the ReferenceTest class subsequently created. The",
"will raise an exception. \"\"\" self.default_data_locations[kind] = location def __init__(self, assert_fn): \"\"\" Initializer",
"in place. This can be overridden using the set_defaults() class method. DEFAULT_FAIL_DIR =",
"check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None,",
"resolve_reference_path(self, path, kind=None): \"\"\" Internal method for deciding where a reference data file",
"Unrecogized option %s' % k) @classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the",
"dataset to use to check that there are no unexpected extra columns. sortby",
"an instance of ReferenceTestCase as a superclass. If calls to assertFileCorrect() (etc) are",
"tend to take longer to run than traditional unit tests, so it is",
"**kwargs): \"\"\" Check that a csv file matches a reference one. actual_path Actual",
"options as follows: index_col is None infer_datetime_format is True quotechar is \"\" quoting",
"failures and reporting them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs):",
"their pathnames are included in the failure messages. If not explicitly set by",
"if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc = ignore_substrings ip = ignore_patterns mpc =",
"a list of names of the matching reference files. The location of the",
"pandas dataframe. If None, then a default csv loader is used, which takes",
"not specified, a default print function is used which writes unbuffered to sys.stdout.",
"precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None,",
"max_permutation_cases=0): \"\"\" Check that a collection of files matche the contents from matching",
"expected lists differ only in that their lines are permutations of each other,",
"NotImplementedError if Pandas is not available. \"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path,",
"ignore_patterns is an optional list of regular expressions; lines will be considered to",
"condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs,",
"limit, then the two are considered to be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv,",
"be identical. This should be used for unstructured data such as logfiles, etc.",
"error messages. check_data Option to specify fields to compare values. check_types Option to",
"/tmp, c:\\temp or whatever tempfile.gettempdir() returns, as appropriate. \"\"\" for k in kwargs:",
"Verbose flag verbose = True # Temporary directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary",
"module. This provides the ReferenceTestCase class, which is a subclass of, and drop-in",
"a function taking a dataframe as its single parameter, and returning a list",
"import tempfile from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is",
"reference one. actual_paths List of Actual csv files. ref_csvs List of names of",
"that match one of these regular expressions. The expressions must not contain parenthesised",
"functionality provided by this class is available through python's standard unittest framework, via",
"the value does not evaluate as true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations =",
"of a particular kind. This sets the location globally, and will affect all",
"for resolving a list of reference data files, all of the same kind.",
"this directory, and their pathnames are included in the failure messages. If not",
"None default, and you make calls to assertFileCorrect() (etc) using relative pathnames for",
"All of the regenerate flags are set to False by default. \"\"\" cls.regenerate[kind]",
"Optional parameter, giving path for file where expected dataframe originated, used for error",
"the default filesystem location for reference files of a particular kind. This sets",
"kind=None, which *must* be specified. If you haven't even defined the None default,",
"running tests. Reference tests tend to take longer to run than traditional unit",
"for reference data, for # each kind. Can be initialized by set_default_data_location(). default_data_locations",
"Pandas is not available. \"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types,",
"\"\"\" Check that a csv file matches a reference one. actual_paths List of",
"with open(reference_path, 'w') as fout: fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written %s' %",
"these regular expressions. The expressions must not contain parenthesised groups, and should only",
"Sets the tmp_dir property globally, to specify the directory where temporary files are",
"== 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework needs to",
"ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths,",
"if the value does not evaluate as true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations",
"data for a list of files. \"\"\" for (actual_path, expected_path) in zip(actual_paths, reference_paths):",
"msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\"",
"originated, used for error messages. kind Reference kind, used to locate the reference",
"matche the contents from matching collection of reference text files. actual_paths is a",
"= location def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None,",
"class subsequently created. The instance method set_data_location() can be used to set the",
"if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip,",
"parameters can be set: verbose Sets the boolean verbose flag globally, to control",
"from failing tests as they happen, rather than waiting for the full report",
"matches the contents from a reference text file. string is the actual string.",
"The instance method set_data_location() can be used to set the per-kind data locations",
"is used. The check_* comparison flags can be of any of the following:",
"Initializer for a ReferenceTest instance. assert_fn Function to be used to make assertions",
"print function is used which writes unbuffered to sys.stdout. tmp_dir Sets the tmp_dir",
"use. The default csv loader function is a wrapper around pandas pd.read_csv(), with",
"kind Reference kind, used to locate the reference csv files. csv_read_fn A function",
"function has been specified. It's useful to be able to see the contents",
"Declare the default filesystem location for reference files of a particular kind. This",
"dataset matches a reference one from a saved reference csv file. df Actual",
"compare float values. **kwargs Any additional named parameters are passed straight through to",
"reference csv files. csv_read_fn A function to use to read a csv file",
"csv files. ref_csvs List of names of matching reference csv file. The location",
"dataframe. If None, then a default csv loader is used. The check_* comparison",
"- There is support for ignoring lines within the strings/files that contain particular",
"to be applied to datasets before comparing. It can be None, or can",
"should_regenerate(self, kind): \"\"\" Internal method to determine if a particular kind of file",
"\"\"\" Check that a file matches the contents from a reference text file.",
"giving path for file where expected dataframe originated, used for error messages. check_data",
"default csv loader is used. The check_* comparison flags can be of any",
"not in self.reference_data_locations: kind = None if kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind],",
"refererence testing for test-driven data analysis. Source repository: http://github.com/tdda/tdda License: MIT Copyright (c)",
"for comparing results against saved \"known to be correct\" reference results. This is",
"left stripped before the comparison is carried out. Note: the stripping on a",
"all of the methods from ReferenceTest, as functions that can be called directly",
"% reference_path) def check_failures(self, failures, msgs): \"\"\" Internal method for check for failures",
"as appropriate. \"\"\" for k in kwargs: if k == 'verbose': cls.verbose =",
"but you can override it by passing in a print_fn parameter to __init__.",
"import PandasComparison from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is the default location for",
"for error messages. kind Reference kind, used to locate the reference csv file.",
"reference csv file. The location of the reference files is determined by the",
"place, so preprocessed versions of the files are written to this directory, and",
"reference kind, used to locate the reference csv file. lstrip if set to",
"be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc",
"in-memory results. \"\"\" with open(reference_path, 'w') as fout: fout.write(result) if self.verbose and self.print_fn:",
"files, then it can't check correctness, so it will raise an exception. \"\"\"",
"true, both strings are right stripped before the comparison is carried out. Note:",
"in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path): \"\"\" Internal method for",
"Exception('set_defaults: Unrecogized option %s' % k) @classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set",
"is the reference kind, used to locate the reference file. lstrip if set",
"be used to make assertions for unit-tests. It should take two parameters: -",
"a list of reference data files, all of the same kind. \"\"\" return",
"in the actual dataset to use to check that there are no unexpected",
"names - a function taking a dataframe as its single parameter, and returning",
"written to a file and a diff command is suggested for seeing the",
"determined by the configuration via set_data_location(). actual_path Optional parameter, giving path for file",
"# DEFAULT_FAIL_DIR is the default location for writing failing output # if assertStringCorrect",
"the failure messages. If not explicitly set by set_defaults(), the environment variable TDDA_FAIL_DIR",
"path, kind=None): \"\"\" Internal method for deciding where a reference data file should",
"used. This is the location declared for kind=None, which *must* be specified. If",
"should be compared). precision Number of decimal places to compare float values. loader",
"list of files. \"\"\" for (actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def",
"sets the location globally, and will affect all instances of the ReferenceTest class",
"at the class level. These defaults will apply to all instances of ReferenceTest",
"used to make assertions for unit-tests. It should take two parameters: - a",
"reference_path): \"\"\" Internal method for regenerating reference data from in-memory results. \"\"\" with",
"you haven't even defined the None default, and you make calls to assertFileCorrect()",
"written to this directory, and their pathnames are included in the failure messages.",
"the location declared for kind=None, which *must* be specified. If you haven't even",
"If not explicitly set by set_defaults(), the environment variable TDDA_FAIL_DIR is used, or,",
"import unicode_literals import os import sys import tempfile from tdda.referencetest.checkpandas import PandasComparison from",
"the same signature as python's __future__ print function. If not specified, a default",
"string matches the contents from a reference text file. string is the actual",
"not in self.regenerate: kind = None return kind in self.regenerate and self.regenerate[kind] def",
"through to the csv_read_fn function. The check_* comparison flags can be of any",
"to determine if a particular kind of file should be regenerated. \"\"\" if",
"that a collection of files matche the contents from matching collection of reference",
"expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc = ignore_substrings ip",
"data such as logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_path",
"a function that takes a dataframe as its single parameter and returns a",
"class-method. Can be initialized via the -w option. regenerate = {} # Dictionary",
"regenerate @classmethod def set_default_data_location(self, location, kind=None): \"\"\" Declare the default filesystem location for",
"a dataframe as its single parameter, and returning a list of field names",
"is support for ignoring lines within the strings/files that contain particular patterns or",
"precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False,",
"named parameters are passed straight through to the csv_read_fn function. The check_* comparison",
"in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference data location for",
"NaN, and NULL keep_default_na is False Raises NotImplementedError if Pandas is not available.",
"Raises NotImplementedError if Pandas is not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if",
"condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None,",
"df Actual dataframe. ref_df Expected dataframe. actual_path Optional parameter, giving path for file",
"than waiting for the full report at the end. Verbose is set to",
"reference file, globally, for all instances of the ReferenceTest class. If the regenerate",
"of its methods can be made available as top-level functions, # to work",
"etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if",
"df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check that",
"condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that an in-memory Pandas dataset matches a",
"failed, if the value does not evaluate as true). \"\"\" self.assert_fn = assert_fn",
"instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc =",
"from in-memory results. \"\"\" with open(reference_path, 'w') as fout: fout.write(result) if self.verbose and",
"will be applied to both the actual and expected. max_permutation_cases is an optional",
"to assertFileCorrect() (etc) are made for kinds of reference data that hasn't had",
"used. The check_* comparison flags can be of any of the following: -",
"function, but you can override it by passing in a print_fn parameter to",
"@classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the regeneration flag for a particular",
"and preprocesses it in some way; this function will be applied to both",
"na_values are the empty string, NaN, and NULL keep_default_na is False Raises NotImplementedError",
"to sort by before comparing. condition Filter to be applied to datasets before",
"kind Reference kind, used to locate the reference csv file. csv_read_fn A function",
"for error messages. expected_path Optional parameter, giving path for file where expected dataframe",
"data such as logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_paths",
"to print messages. By default, it will use this print function, but you",
"ignored in the comparison. ignore_patterns is an optional list of regular expressions; lines",
"ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r",
"List of Actual csv files. ref_csvs List of names of matching reference csv",
"the methods from ReferenceTest, as functions that can be called directly as part",
"strings are right stripped before the comparison is carried out. Note: the stripping",
"of strings and preprocesses it in some way; this function will be applied",
"print messages. By default, it will use this print function, but you can",
"referencepytest module. This module provides all of the methods from ReferenceTest, as functions",
"obtain a pandas dataframe. If None, then a default csv loader is used.",
"def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method for regenerating reference data for a",
"field order. check_extra_cols Option to specify fields in the actual dataset to use",
"been specified. It's useful to be able to see the contents of the",
"then the default location is used. This is the location declared for kind=None,",
"referencetest.py: refererence testing for test-driven data analysis. Source repository: http://github.com/tdda/tdda License: MIT Copyright",
"files. lstrip if set to true, both strings are left stripped before the",
"flags can be of any of the following: - None (to apply that",
"\"\"\" Set the regeneration flag for a particular kind of reference file, globally,",
"Check that an in-memory string matches the contents from a reference text file.",
"ReferenceTest class. If the regenerate flag is set to True, then the framework",
"compared). precision Number of decimal places to compare float values. **kwargs Any additional",
"messages. expected_path Optional parameter, giving path for file where expected dataframe originated, used",
"be used to set the per-kind data locations for an individual instance of",
"of that kind, rather than comparing. All of the regenerate flags are set",
"out. Note: the stripping on a per-line basis. ignore_substrings is an optional list",
"'preprocessing' # in place. This can be overridden using the set_defaults() class method.",
"= r self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None): \"\"\" Internal method for deciding",
"locate the reference csv files. csv_read_fn A function to use to read a",
"elif k == 'print_fn': cls.print_fn = kwargs[k] elif k == 'tmp_dir': cls.tmp_dir =",
"kind=None, regenerate=True): \"\"\" Set the regeneration flag for a particular kind of reference",
"compared). precision Number of decimal places to compare float values. The check_* comparison",
"which rows should be compared). precision Number of decimal places to compare float",
"check that there are no unexpected extra columns. sortby Option to specify fields",
"name of the reference file. The location of the reference file is determined",
"**kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush() # Default print function print_fn = default_print_fn",
"differ only in that their lines are permutations of each other, and the",
"check correctness, so it will raise an exception. \"\"\" self.reference_data_locations[kind] = location def",
"Check that a csv file matches a reference one. actual_path Actual csv file.",
"division from __future__ import unicode_literals import os import sys import tempfile from tdda.referencetest.checkpandas",
"kind=None): \"\"\" Declare the filesystem location for reference files of a particular kind.",
"def write_reference_result(self, result, reference_path): \"\"\" Internal method for regenerating reference data from in-memory",
"specified. If you haven't even defined the None default, and you make calls",
"of the matching reference files. The location of the reference files is determined",
"the referencepytest module. This module provides all of the methods from ReferenceTest, as",
"for unstructured data such as logfiles, etc. For csv files, use assertCSVFileCorrect instead.",
"the location declared for kind=None, which *must* be specified. This method overrides any",
"are created whenever a text file check fails and a 'preprocess' function has",
"is \\ na_values are the empty string, NaN, and NULL keep_default_na is False",
"The location of the reference files is determined by the configuration via set_data_location().",
"== 'tmp_dir': cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults: Unrecogized option %s' % k)",
"kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a collection of",
"kind, used to locate the reference files. lstrip if set to true, both",
"as they happen, rather than waiting for the full report at the end.",
"be specified. If you haven't even defined the None default, and you make",
"filesystem location for reference files of a particular kind. Typically you would subclass",
"for deciding where a reference data file should be looked for, if it",
"tests. The function should have the same signature as python's __future__ print function.",
"expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def",
"sys.stdout. tmp_dir Sets the tmp_dir property globally, to specify the directory where temporary",
"mpc = max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc)",
"method when constructing an instance of ReferenceTestCase as a superclass. If calls to",
"if that is not defined, it defaults to /tmp, c:\\temp or whatever tempfile.gettempdir()",
"None if kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference",
"= self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc = ignore_substrings ip =",
"to all instances of ReferenceTest subsequently created. The following parameters can be set:",
"for kinds of reference data that hasn't had its location defined explicitly, then",
"kind) return path def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method for resolving a",
"as output. The main features are: - If the comparison between a string",
"file is determined by the configuration via set_data_location(). actual_path Optional parameter, giving path",
"stripped before the comparison is carried out. Note: the stripping on a per-line",
"of the reference csv file. The location of the reference file is determined",
"def write_reference_file(self, actual_path, reference_path): \"\"\" Internal method for regenerating reference data. \"\"\" with",
"actual dataframe originated, used for error messages. expected_path Optional parameter, giving path for",
"file is determined by the configuration via set_data_location(). kind is the reference kind,",
"such permutations does not exceed this limit, then the two are considered to",
"% k) @classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the regeneration flag for",
"self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order,",
"able to see information from failing tests as they happen, rather than waiting",
"is also available through the pytest framework, via the referencepytest module. This module",
"methods from ReferenceTest, as functions that can be called directly as part of",
"is the location declared for kind=None, which *must* be specified. If you haven't",
"kind of comparison completely) - a list of field names - a function",
"evaluate as true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn,",
"rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that an in-memory string matches the",
"files. actual_paths is a list of paths for text files. ref_paths is a",
"comparison between a string and a file fails, the actual string is written",
"set to False by default. \"\"\" cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self, location,",
"class. If calls to assertFileCorrect() (etc) are made for kinds of reference data",
"msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\"",
"compare float values. The check_* comparison flags can be of any of the",
"actual_paths, reference_paths): \"\"\" Internal method for regenerating reference data for a list of",
"python's standard unittest framework, via the referencetestcase module. This provides the ReferenceTestCase class,",
"for all instances of the ReferenceTest class. If the regenerate flag is set",
"the actual and expected. max_permutation_cases is an optional number specifying the maximum number",
"explicitly, then the default location is used. This is the location declared for",
"to /tmp, c:\\temp or whatever tempfile.gettempdir() returns, as appropriate. \"\"\" for k in",
"result, reference_path): \"\"\" Internal method for regenerating reference data from in-memory results. \"\"\"",
"that kind of comparison completely) - a list of field names - a",
"check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that an in-memory Pandas dataset matches",
"ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path,",
"location for \"%s\"' % kind) return path def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal",
"write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method for regenerating reference data for a list",
"unittest.TestCase. It extends that class with all of the methods from ReferenceTest. The",
"flag for a particular kind of reference file, globally, for all instances of",
"useful to be able to see the contents of the files after preprocessing",
"set to True by default. print_fn Sets the print function globally, to specify",
"check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertStringCorrect(self,",
"of file should be regenerated. \"\"\" if kind not in self.regenerate: kind =",
"that takes a dataframe as its single parameter and returns a vector of",
"function globally, to specify the function to use to display information while running",
"def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check",
"\"\"\" Internal method for regenerating reference data for a list of files. \"\"\"",
"def set_default_data_location(self, location, kind=None): \"\"\" Declare the default filesystem location for reference files",
"globally, to control reporting of errors while running tests. Reference tests tend to",
"a file matches the contents from a reference text file. actual_path is a",
"location is used. This is the location declared for kind=None, which *must* be",
"of reference data that hasn't had its location defined explicitly, then the default",
"function should have the same signature as python's __future__ print function. If not",
"defaults will apply to all instances of ReferenceTest subsequently created. The following parameters",
"the actual output. This, obviously, should be used only after careful checking that",
"be initialized by set_default_data_location(). default_data_locations = {} @classmethod def set_defaults(cls, **kwargs): \"\"\" Set",
"index_col is None infer_datetime_format is True quotechar is \"\" quoting is csv.QUOTE_MINIMAL escapechar",
"are considered to be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string,",
"def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check",
"absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals",
"PandasComparison from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is the default location for writing",
"reference data. \"\"\" with open(actual_path) as fin: actual = fin.read() self.write_reference_result(actual, reference_path) def",
"from a saved reference csv file. df Actual dataframe. ref_csv Name of reference",
"file matches a reference one. actual_paths List of Actual csv files. ref_csvs List",
"# in place. This can be overridden using the set_defaults() class method. DEFAULT_FAIL_DIR",
"parameters as the standard pandas pd.read_csv() function. check_data Option to specify fields to",
"taken place, so preprocessed versions of the files are written to this directory,",
"a vector of booleans (to specify which rows should be compared). precision Number",
"Option to specify fields to compare typees. check_order Option to specify fields to",
"actual dataset to use to check that there are no unexpected extra columns.",
"the reference output with the actual output. This, obviously, should be used only",
"can't check correctness, so it will raise an exception. \"\"\" self.reference_data_locations[kind] = location",
"df Actual dataframe. ref_csv Name of reference csv file. The location of the",
"and timestamps that vary in the output from run to run, but which",
"actual = fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method for",
"compared). precision Number of decimal places to compare float values. loader Function to",
"file should be looked for, if it has been specified using a relative",
"results. \"\"\" with open(reference_path, 'w') as fout: fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written",
"\"\"\" with open(actual_path) as fin: actual = fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths,",
"unicode_literals import os import sys import tempfile from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles",
"False Raises NotImplementedError if Pandas is not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind)",
"using a relative path. \"\"\" if self.reference_data_locations and not os.path.isabs(path): if kind not",
"be applied to datasets before comparing. It can be None, or can be",
"utf-8 -*- \"\"\" referencetest.py: refererence testing for test-driven data analysis. Source repository: http://github.com/tdda/tdda",
"not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r",
"be used for unstructured data such as logfiles, etc. For csv files, use",
"assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc",
"reference data file should be looked for, if it has been specified using",
"ReferenceTest. The functionality is also available through the pytest framework, via the referencepytest",
"flag verbose = True # Temporary directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing",
"\"\"\" cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self, location, kind=None): \"\"\" Declare the default",
"k) @classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the regeneration flag for a",
"Optional parameter, giving path for file where actual dataframe originated, used for error",
"it will raise an exception. \"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df, ref_df,",
"booleans (to specify which rows should be compared). precision Number of decimal places",
"expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order,",
"default location for writing failing output # if assertStringCorrect or assertFileCorrect fail with",
"actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv',",
"location globally, and will affect all instances of the ReferenceTest class subsequently created.",
"Pandas is not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path)",
"top-level functions, # to work will with frameworks like pytest. ReferenceTest.__all__ = dir(ReferenceTest)",
"a particular kind of reference file, globally, for all instances of the ReferenceTest",
"rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def resolve_reference_path(self,",
"either because the previous output was in fact wrong, or because the intended",
"out. Note: the stripping on a per-line basis. rstrip if set to true,",
"file to obtain a pandas dataframe. If None, then a default csv loader",
"be compared). precision Number of decimal places to compare float values. loader Function",
"regeneration flag for a particular kind of reference file, globally, for all instances",
"of the class. If calls to assertFileCorrect() (etc) are made for kinds of",
"be a function that takes a dataframe as its single parameter and returns",
"used which writes unbuffered to sys.stdout. tmp_dir Sets the tmp_dir property globally, to",
"file where expected dataframe originated, used for error messages. check_data Option to specify",
"and the number of such permutations does not exceed this limit, then the",
"should be regenerated. \"\"\" if kind not in self.regenerate: kind = None return",
"class, which is a subclass of, and drop-in replacement for unittest.TestCase. It extends",
"(failures, msgs) = r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None,",
"maximum number of permutations allowed; if the actual and expected lists differ only",
"timestamps that vary in the output from run to run, but which do",
"for the full report at the end. Verbose is set to True by",
"where actual dataframe originated, used for error messages. kind Reference kind, used to",
"kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that a",
"the reference files is determined by the configuration via set_data_location(). kind Reference kind,",
"the reference kind, used to locate the reference files. lstrip if set to",
"location def __init__(self, assert_fn): \"\"\" Initializer for a ReferenceTest instance. assert_fn Function to",
"which kinds of reference files should be # regenerated when the tests are",
"of regular expressions; lines will be considered to be the same if they",
"The location of the reference file is determined by the configuration via set_data_location().",
"the methods from ReferenceTest. The functionality is also available through the pytest framework,",
"# each kind. Can be initialized by set_default_data_location(). default_data_locations = {} @classmethod def",
"substrings; lines containing any of these substrings will be ignored in the comparison.",
"reference text file. string is the actual string. ref_csv is the name of",
"ref_paths is a list of names of the matching reference files. The location",
"an optional function that takes a list of strings and preprocesses it in",
"msgs): \"\"\" Internal method for check for failures and reporting them. \"\"\" self.assert_fn(failures",
"reference data from in-memory results. \"\"\" with open(reference_path, 'w') as fout: fout.write(result) if",
"dataframe originated, used for error messages. check_data Option to specify fields to compare",
"exceed this limit, then the two are considered to be identical. \"\"\" expected_path",
"subsequently created. The following parameters can be set: verbose Sets the boolean verbose",
"actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures,",
"for ignoring lines within the strings/files that contain particular patterns or regular expressions.",
"do not indicate a problem. - There is support for re-writing the reference",
"If the regenerate flag is set to True, then the framework will regenerate",
"= self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df,",
"declared for kind=None, which *must* be specified. If you haven't even defined the",
"by default. \"\"\" cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self, location, kind=None): \"\"\" Declare",
"pass) - a string (to report details of how a test failed, if",
"msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None,",
"a saved reference csv file. df Actual dataframe. ref_csv Name of reference csv",
"does not exceed this limit, then the two are considered to be identical.",
"should be set using the # set_regeneration() class-method. Can be initialized via the",
"correctness, so it will raise an exception. \"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self,",
"via set_data_location(). kind is the reference kind, used to locate the reference files.",
"msgs) = r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None,",
"features are: - If the comparison between a string and a file fails,",
"support for ignoring lines within the strings/files that contain particular patterns or regular",
"an exception. \"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None,",
"framework, via the referencepytest module. This module provides all of the methods from",
"can be None, or can be a function that takes a dataframe as",
"contents from a reference text file. string is the actual string. ref_csv is",
"that contain particular patterns or regular expressions. This is typically useful for filtering",
"\"\"\" Set default parameters, at the class level. These defaults will apply to",
"pytest suite. \"\"\" # Verbose flag verbose = True # Temporary directory tmp_dir",
"None infer_datetime_format is True quotechar is \"\" quoting is csv.QUOTE_MINIMAL escapechar is \\",
"of comparison to all fields) - False (to skip that kind of comparison",
"__init__. \"\"\" print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush() # Default print function",
"csv.QUOTE_MINIMAL escapechar is \\ na_values are the empty string, NaN, and NULL keep_default_na",
"new output is correct, either because the previous output was in fact wrong,",
"regenerating reference data from in-memory results. \"\"\" with open(reference_path, 'w') as fout: fout.write(result)",
"results against saved \"known to be correct\" reference results. This is typically useful",
"is not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else:",
"file. csv_read_fn A function to use to read a csv file to obtain",
"not indicate a problem. - There is support for re-writing the reference output",
"all instances of the ReferenceTest class subsequently created. The instance method set_data_location() can",
"None, or can be a function that takes a dataframe as its single",
"function that takes a list of strings and preprocesses it in some way;",
"to take longer to run than traditional unit tests, so it is often",
"a module, # so that all of its methods can be made available",
"of the reference file. The location of the reference file is determined by",
"self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn,",
"locate the reference files. lstrip if set to true, both strings are left",
"so it will raise an exception. \"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df,",
"k in kwargs: if k == 'verbose': cls.verbose = kwargs[k] elif k ==",
"If None, then a default csv loader is used. The check_* comparison flags",
"only differ in substrings that match one of these regular expressions. The expressions",
"tempfile.gettempdir() returns, as appropriate. \"\"\" for k in kwargs: if k == 'verbose':",
"to true, both strings are left stripped before the comparison is carried out.",
"lstrip if set to true, both strings are left stripped before the comparison",
"kind, used to locate the reference csv file. check_data Option to specify fields",
"set to true, both strings are left stripped before the comparison is carried",
"file fails, the actual string is written to a file and a diff",
"infer_datetime_format is True quotechar is \"\" quoting is csv.QUOTE_MINIMAL escapechar is \\ na_values",
"preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None,",
"suite. \"\"\" # Verbose flag verbose = True # Temporary directory tmp_dir =",
"subsequently created. The instance method set_data_location() can be used to set the per-kind",
"expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs)",
"of field names to use. Raises NotImplementedError if Pandas is not available. \"\"\"",
"for a ReferenceTest instance. assert_fn Function to be used to make assertions for",
"a pytest suite. \"\"\" # Verbose flag verbose = True # Temporary directory",
"print function, but you can override it by passing in a print_fn parameter",
"the csv_read_fn function. The check_* comparison flags can be of any of the",
"of ReferenceTestCase as a superclass. If calls to assertFileCorrect() (etc) are made for",
"analysis. Source repository: http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic Solutions Limited 2016 \"\"\"",
"which is a subclass of, and drop-in replacement for unittest.TestCase. It extends that",
"are included in the failure messages. If not explicitly set by set_defaults(), the",
"by the configuration via set_data_location(). kind is the reference kind, used to locate",
"actual output. This, obviously, should be used only after careful checking that the",
"kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition,",
"It's useful to be able to see the contents of the files after",
"full report at the end. Verbose is set to True by default. print_fn",
"global defaults set from calls to the set_default_data_location class-method. If you haven't even",
"can be a function that takes a dataframe as its single parameter and",
"determined by the configuration via set_data_location(). kind is the reference kind, used to",
"if a particular kind of file should be regenerated. \"\"\" if kind not",
"it can't check correctness, so it will raise an exception. \"\"\" self.default_data_locations[kind] =",
"extends that class with all of the methods from ReferenceTest. The functionality is",
"self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def",
"initialized via the -w option. regenerate = {} # Dictionary describing default location",
"for failures and reporting them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args,",
"kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc = ignore_substrings ip = ignore_patterns mpc",
"Temporary directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing which kinds of reference files",
"location for reference data, for # each kind. Can be initialized by set_default_data_location().",
"self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings,",
"as logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path,",
"the print function globally, to specify the function to use to display information",
"is False Raises NotImplementedError if Pandas is not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv,",
"does not evaluate as true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas",
"module provides all of the methods from ReferenceTest, as functions that can be",
"ip = ignore_patterns mpc = max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip,",
"self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision)",
"text file check fails and a 'preprocess' function has been specified. It's useful",
"as fout: fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written %s' % reference_path) def check_failures(self,",
"be of any of the following: - None (to apply that kind of",
"so preprocessed versions of the files are written to this directory, and their",
"of a particular kind. Typically you would subclass ReferenceTestCase and pass in these",
"columns. sortby Option to specify fields to sort by before comparing. condition Filter",
"The function should have the same signature as python's __future__ print function. If",
"for writing failing output # if assertStringCorrect or assertFileCorrect fail with 'preprocessing' #",
"of such permutations does not exceed this limit, then the two are considered",
"check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def",
"data that hasn't had its location defined explicitly, then the default location is",
"__init__ method when constructing an instance of ReferenceTestCase as a superclass. If calls",
"one. actual_paths List of Actual csv files. ref_csvs List of names of matching",
"default, it will use this print function, but you can override it by",
"needs to print messages. By default, it will use this print function, but",
"name of the reference csv file. The location of the reference file is",
"raise an exception. \"\"\" self.default_data_locations[kind] = location def __init__(self, assert_fn): \"\"\" Initializer for",
"in-memory reference one. df Actual dataframe. ref_df Expected dataframe. actual_path Optional parameter, giving",
"way; this function will be applied to both the actual and expected. max_permutation_cases",
"set using the # set_regeneration() class-method. Can be initialized via the -w option.",
"for kind=None, which *must* be specified. This method overrides any global defaults set",
"to be correct\" reference results. This is typically useful when software produces either",
"Internal method for check for failures and reporting them. \"\"\" self.assert_fn(failures == 0,",
"should be compared). precision Number of decimal places to compare float values. **kwargs",
"used for error messages. check_data Option to specify fields to compare values. check_types",
"\"\"\" Initializer for a ReferenceTest instance. assert_fn Function to be used to make",
"particular kind of file should be regenerated. \"\"\" if kind not in self.regenerate:",
"is the name of the reference file. The location of the reference file",
"pandas pd.read_csv(), with default options as follows: index_col is None infer_datetime_format is True",
"checking that the new output is correct, either because the previous output was",
"for file where actual dataframe originated, used for error messages. expected_path Optional parameter,",
"Verbose is set to True by default. print_fn Sets the print function globally,",
"set the per-kind data locations for an individual instance of the class. If",
"Magic so that an instance of this class can masquerade as a module,",
"{} @classmethod def set_defaults(cls, **kwargs): \"\"\" Set default parameters, at the class level.",
"dataframe. If None, then a default csv loader is used, which takes the",
"skip that kind of comparison completely) - a list of field names -",
"It should take two parameters: - a value (which should evaluate as true",
"the None default, and you make calls to assertFileCorrect() (etc) using relative pathnames",
"Internal method for regenerating reference data for a list of files. \"\"\" for",
"check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that a csv file matches",
"identical. This should be used for unstructured data such as logfiles, etc. For",
"precision Number of decimal places to compare float values. The check_* comparison flags",
"use to check that there are no unexpected extra columns. sortby Option to",
"elif k == 'tmp_dir': cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults: Unrecogized option %s'",
"boolean verbose flag globally, to control reporting of errors while running tests. Reference",
"the location globally, and will affect all instances of the ReferenceTest class subsequently",
"class level. These defaults will apply to all instances of ReferenceTest subsequently created.",
"optional number specifying the maximum number of permutations allowed; if the actual and",
"regenerate=True): \"\"\" Set the regeneration flag for a particular kind of reference file,",
"is False Raises NotImplementedError if Pandas is not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs,",
"(to skip that kind of comparison completely) - a list of field names",
"option %s' % k) @classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the regeneration",
"parameters: - a value (which should evaluate as true for the test to",
"p in paths] def should_regenerate(self, kind): \"\"\" Internal method to determine if a",
"is not defined, it defaults to /tmp, c:\\temp or whatever tempfile.gettempdir() returns, as",
"dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location,",
"if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby,",
"= ignore_patterns mpc = max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc,",
"substrings will be ignored in the comparison. ignore_patterns is an optional list of",
"drop-in replacement for unittest.TestCase. It extends that class with all of the methods",
"if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path,",
"ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def resolve_reference_path(self, path,",
"the reference file. The location of the reference file is determined by the",
"particular kind. Typically you would subclass ReferenceTestCase and pass in these locations though",
"if they need refer to the whole line. preprocess is an optional function",
"expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures,",
"to be able to see the contents of the files after preprocessing has",
"= self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r",
"explicitly set by set_defaults(), the environment variable TDDA_FAIL_DIR is used, or, if that",
"ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a collection of files matche the",
"a path for a text file. ref_path is the name of the reference",
"r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None,",
"using the set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class",
"for \"%s\"' % kind) return path def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method",
"= self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types,",
"data, for # each kind. Can be initialized by set_default_data_location(). default_data_locations = {}",
"in the comparison. ignore_patterns is an optional list of regular expressions; lines will",
"ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a file matches the contents from",
"- If the comparison between a string and a file fails, the actual",
"self.print_fn('Written %s' % reference_path) def check_failures(self, failures, msgs): \"\"\" Internal method for check",
"ilc = ignore_substrings ip = ignore_patterns mpc = max_permutation_cases r = self.files.check_string_against_file(string, expected_path,",
"would subclass ReferenceTestCase and pass in these locations though its __init__ method when",
"completely) - a list of field names - a function taking a dataframe",
"the reference kind, used to locate the reference csv file. lstrip if set",
"actual and expected lists differ only in that their lines are permutations of",
"running tests. The function should have the same signature as python's __future__ print",
"= True # Temporary directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing which kinds",
"\"\"\" for k in kwargs: if k == 'verbose': cls.verbose = kwargs[k] elif",
"expected dataframe originated, used for error messages. check_data Option to specify fields to",
"to specify fields to compare typees. check_order Option to specify fields to compare",
"of booleans (to specify which rows should be compared). precision Number of decimal",
"kind): \"\"\" Internal method to determine if a particular kind of file should",
"expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check that an in-memory Pandas",
"one from a saved reference csv file. df Actual dataframe. ref_csv Name of",
"the contents from a reference text file. string is the actual string. ref_csv",
"return kind in self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\" Internal method",
"unittest framework, via the referencetestcase module. This provides the ReferenceTestCase class, which is",
"csv file. lstrip if set to true, both strings are left stripped before",
"for a particular kind of reference file, globally, for all instances of the",
"\"\"\" from __future__ import absolute_import from __future__ import print_function from __future__ import division",
"containing any of these substrings will be ignored in the comparison. ignore_patterns is",
"actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that",
"Class for comparing results against saved \"known to be correct\" reference results. This",
"assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that",
"pandas pd.read_csv() function. check_data Option to specify fields to compare values. check_types Option",
"csv file matches a reference one. actual_paths List of Actual csv files. ref_csvs",
"condition=None, sortby=None, precision=None): \"\"\" Check that an in-memory Pandas dataframe matches an in-memory",
"a problem. - There is support for re-writing the reference output with the",
"the regenerate flag is set to True, then the framework will regenerate reference",
"None, then a default csv loader is used, which takes the same parameters",
"lines containing any of these substrings will be ignored in the comparison. ignore_patterns",
"False Raises NotImplementedError if Pandas is not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind)",
"% kind) return path def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method for resolving",
"dataframe matches an in-memory reference one. df Actual dataframe. ref_df Expected dataframe. actual_path",
"typically useful when software produces either a (text or csv) file or a",
"dataframe originated, used for error messages. expected_path Optional parameter, giving path for file",
"set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the regeneration flag for a particular kind of",
"a test failed, if the value does not evaluate as true). \"\"\" self.assert_fn",
"reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path): \"\"\" Internal method for regenerating reference",
"signature as python's __future__ print function. If not specified, a default print function",
"ignore_patterns mpc = max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip,",
"logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind)",
"available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r =",
"in self.regenerate: kind = None return kind in self.regenerate and self.regenerate[kind] def write_reference_file(self,",
"will be ignored in the comparison. ignore_patterns is an optional list of regular",
"produces either a (text or csv) file or a string as output. The",
"= self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures,",
"as a superclass. If calls to assertFileCorrect() (etc) are made for kinds of",
"function to use to read a csv file to obtain a pandas dataframe.",
"set_regeneration() class-method. Can be initialized via the -w option. regenerate = {} #",
"= self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types,",
"directory, and their pathnames are included in the failure messages. If not explicitly",
"be overridden using the set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object):",
"= kwargs[k] elif k == 'print_fn': cls.print_fn = kwargs[k] elif k == 'tmp_dir':",
"csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that a csv",
"a list of files. \"\"\" for (actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path)",
"Solutions Limited 2016 \"\"\" from __future__ import absolute_import from __future__ import print_function from",
"as true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose)",
"and the expected output. - There is support for ignoring lines within the",
"regular expressions; lines will be considered to be the same if they only",
"comparison completely) - a list of field names - a function taking a",
"should be compared). precision Number of decimal places to compare float values. The",
"regenerate = {} # Dictionary describing default location for reference data, for #",
"expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path,",
"k == 'tmp_dir': cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults: Unrecogized option %s' %",
"the actual string is written to a file and a diff command is",
"be made available as top-level functions, # to work will with frameworks like",
"are: - If the comparison between a string and a file fails, the",
"be used only after careful checking that the new output is correct, either",
"directory where temporary files are written. Temporary files are created whenever a text",
"be the same if they only differ in substrings that match one of",
"files, use assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path)",
"Can be initialized by set_default_data_location(). default_data_locations = {} @classmethod def set_defaults(cls, **kwargs): \"\"\"",
"will regenerate reference data of that kind, rather than comparing. All of the",
"assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir)",
"passed straight through to the csv_read_fn function. The check_* comparison flags can be",
"then the two are considered to be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind)",
"preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None): \"\"\"",
"dataframe as its single parameter and returns a vector of booleans (to specify",
"(failures, msgs) = r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None,",
"specify which rows should be compared). precision Number of decimal places to compare",
"loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self,",
"on a per-line basis. ignore_substrings is an optional list of substrings; lines containing",
"expected_path) else: mpc = max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns,",
"set to True, then the framework will regenerate reference data of that kind,",
"precision=None, **kwargs): \"\"\" Check that a csv file matches a reference one. actual_path",
"Limited 2016 \"\"\" from __future__ import absolute_import from __future__ import print_function from __future__",
"\"\"\" Check that a collection of files matche the contents from matching collection",
"regular expressions. This is typically useful for filtering out things like version numbers",
"then the two are considered to be identical. This should be used for",
"check correctness, so it will raise an exception. \"\"\" self.default_data_locations[kind] = location def",
"and reporting them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\"",
"to compare field order. check_extra_cols Option to specify fields in the actual dataset",
"function. check_data Option to specify fields to compare values. check_types Option to specify",
"text files. ref_paths is a list of names of the matching reference files.",
"this print function, but you can override it by passing in a print_fn",
"**kwargs): \"\"\" Check that an in-memory Pandas dataset matches a reference one from",
"matching collection of reference text files. actual_paths is a list of paths for",
"of the files are written to this directory, and their pathnames are included",
"the ReferenceTestCase class, which is a subclass of, and drop-in replacement for unittest.TestCase.",
"Dictionary describing default location for reference data, for # each kind. Can be",
"originated, used for error messages. check_data Option to specify fields to compare values.",
"looked for, if it has been specified using a relative path. \"\"\" if",
"replacement for unittest.TestCase. It extends that class with all of the methods from",
"to be able to see information from failing tests as they happen, rather",
"sys.stdout) outfile.flush() # Default print function print_fn = default_print_fn # Magic so that",
"output. The main features are: - If the comparison between a string and",
"for re-writing the reference output with the actual output. This, obviously, should be",
"additional named parameters are passed straight through to the csv_read_fn function. The check_*",
"string as output. The main features are: - If the comparison between a",
"raise an exception. \"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None,",
"fields to compare values. check_types Option to specify fields to compare typees. check_order",
"that the new output is correct, either because the previous output was in",
"that can be called directly as part of a pytest suite. \"\"\" #",
"of the same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for p in paths] def",
"specified using a relative path. \"\"\" if self.reference_data_locations and not os.path.isabs(path): if kind",
"print function. If not specified, a default print function is used which writes",
"the stripping on a per-line basis. ignore_substrings is an optional list of substrings;",
"parameters are passed straight through to the csv_read_fn function. The check_* comparison flags",
"a file fails, the actual string is written to a file and a",
"parenthesised groups, and should only include explicit anchors if they need refer to",
"check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that an in-memory Pandas dataset",
"data locations for an individual instance of the class. If calls to assertFileCorrect()",
"check fails and a 'preprocess' function has been specified. It's useful to be",
"write_reference_result(self, result, reference_path): \"\"\" Internal method for regenerating reference data from in-memory results.",
"Internal method for regenerating reference data from in-memory results. \"\"\" with open(reference_path, 'w')",
"test failed, if the value does not evaluate as true). \"\"\" self.assert_fn =",
"ref_path is the name of the reference file. The location of the reference",
"a particular kind. Typically you would subclass ReferenceTestCase and pass in these locations",
"flag globally, to control reporting of errors while running tests. Reference tests tend",
"the # set_regeneration() class-method. Can be initialized via the -w option. regenerate =",
"it in some way; this function will be applied to both the actual",
"sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv, kind=None,",
"self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\" Internal method for regenerating reference",
"expressions must not contain parenthesised groups, and should only include explicit anchors if",
"included in the failure messages. If not explicitly set by set_defaults(), the environment",
"set by set_defaults(), the environment variable TDDA_FAIL_DIR is used, or, if that is",
"all instances of the ReferenceTest class. If the regenerate flag is set to",
"determine if a particular kind of file should be regenerated. \"\"\" if kind",
"If None, then a default csv loader is used, which takes the same",
"== 'print_fn': cls.print_fn = kwargs[k] elif k == 'tmp_dir': cls.tmp_dir = kwargs[k] else:",
"pd.read_csv(), with default options as follows: index_col is None infer_datetime_format is True quotechar",
"control reporting of errors while running tests. Reference tests tend to take longer",
"refer to the whole line. preprocess is an optional function that takes a",
"check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertStringCorrect(self, string,",
"appropriate. \"\"\" for k in kwargs: if k == 'verbose': cls.verbose = kwargs[k]",
"output # if assertStringCorrect or assertFileCorrect fail with 'preprocessing' # in place. This",
"= location def __init__(self, assert_fn): \"\"\" Initializer for a ReferenceTest instance. assert_fn Function",
"precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv',",
"an in-memory reference one. df Actual dataframe. ref_df Expected dataframe. actual_path Optional parameter,",
"should be used only after careful checking that the new output is correct,",
"of each other, and the number of such permutations does not exceed this",
"specified. It's useful to be able to see the contents of the files",
"instance. assert_fn Function to be used to make assertions for unit-tests. It should",
"method set_data_location() can be used to set the per-kind data locations for an",
"the reference csv file. lstrip if set to true, both strings are left",
"framework needs to print messages. By default, it will use this print function,",
"from __future__ import division from __future__ import unicode_literals import os import sys import",
"the referencetestcase module. This provides the ReferenceTestCase class, which is a subclass of,",
"can be made available as top-level functions, # to work will with frameworks",
"files is determined by the configuration via set_data_location(). kind Reference kind, used to",
"check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertDatasetCorrect(self,",
"of these substrings will be ignored in the comparison. ignore_patterns is an optional",
"list of regular expressions; lines will be considered to be the same if",
"has been specified using a relative path. \"\"\" if self.reference_data_locations and not os.path.isabs(path):",
"in kwargs: if k == 'verbose': cls.verbose = kwargs[k] elif k == 'print_fn':",
"reference_path): \"\"\" Internal method for regenerating reference data. \"\"\" with open(actual_path) as fin:",
"configuration via set_data_location(). actual_path Optional parameter, giving path for file where actual dataframe",
"tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for comparing results against saved \"known to be",
"\"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths,",
"is suggested for seeing the differences between the actual output and the expected",
"- None (to apply that kind of comparison to all fields) - False",
"cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults: Unrecogized option %s' % k) @classmethod def",
"is a list of names of the matching reference files. The location of",
"a 'preprocess' function has been specified. It's useful to be able to see",
"the actual output and the expected output. - There is support for ignoring",
"the new output is correct, either because the previous output was in fact",
"It can be None, or can be a function that takes a dataframe",
"class with all of the methods from ReferenceTest. The functionality is also available",
"can be called directly as part of a pytest suite. \"\"\" # Verbose",
"via the referencepytest module. This module provides all of the methods from ReferenceTest,",
"versions of the files are written to this directory, and their pathnames are",
"- a function taking a dataframe as its single parameter, and returning a",
"reporting them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes",
"compare float values. loader Function to use to read a csv file to",
"not available. \"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition,",
"msgs) = r self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None,",
"hasn't had its location defined explicitly, then the default location is used. This",
"be set using the # set_regeneration() class-method. Can be initialized via the -w",
"loader is used, which takes the same parameters as the standard pandas pd.read_csv()",
"then the framework will regenerate reference data of that kind, rather than comparing.",
"= r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None,",
"of names of the matching reference files. The location of the reference files",
"is an optional number specifying the maximum number of permutations allowed; if the",
"exceed this limit, then the two are considered to be identical. This should",
"before the comparison is carried out. Note: the stripping on a per-line basis.",
"level. These defaults will apply to all instances of ReferenceTest subsequently created. The",
"is the default location for writing failing output # if assertStringCorrect or assertFileCorrect",
"\"\"\" Check that an in-memory Pandas dataset matches a reference one from a",
"via set_data_location(). kind Reference kind, used to locate the reference csv file. csv_read_fn",
"whole line. preprocess is an optional function that takes a list of strings",
"should take two parameters: - a value (which should evaluate as true for",
"= self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures,",
"its single parameter, and returning a list of field names to use. The",
"sortby=None, precision=None, **kwargs): \"\"\" Check that an in-memory Pandas dataset matches a reference",
"by passing in a print_fn parameter to __init__. \"\"\" print(*args, **kwargs) outfile =",
"regenerating reference data. \"\"\" with open(actual_path) as fin: actual = fin.read() self.write_reference_result(actual, reference_path)",
"csv files, use assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths,",
"configuration via set_data_location(). kind Reference kind, used to locate the reference csv files.",
"and expected. max_permutation_cases is an optional number specifying the maximum number of permutations",
"particular kind. This sets the location globally, and will affect all instances of",
"file. The location of the reference file is determined by the configuration via",
"for unittest.TestCase. It extends that class with all of the methods from ReferenceTest.",
"for regenerating reference data from in-memory results. \"\"\" with open(reference_path, 'w') as fout:",
"kind, rather than comparing. All of the regenerate flags are set to False",
"= self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) =",
"in-memory Pandas dataframe matches an in-memory reference one. df Actual dataframe. ref_df Expected",
"instance method set_data_location() can be used to set the per-kind data locations for",
"or csv) file or a string as output. The main features are: -",
"a text file check fails and a 'preprocess' function has been specified. It's",
"= {} @classmethod def set_defaults(cls, **kwargs): \"\"\" Set default parameters, at the class",
"files. ref_paths is a list of names of the matching reference files. The",
"of Actual csv files. ref_csvs List of names of matching reference csv file.",
"to use. Raises NotImplementedError if Pandas is not available. \"\"\" r = self.pandas.check_dataframe(df,",
"or, if that is not defined, it defaults to /tmp, c:\\temp or whatever",
"from ReferenceTest, as functions that can be called directly as part of a",
"as its single parameter and returns a vector of booleans (to specify which",
"Actual csv files. ref_csvs List of names of matching reference csv file. The",
"failure messages. If not explicitly set by set_defaults(), the environment variable TDDA_FAIL_DIR is",
"it has been specified using a relative path. \"\"\" if self.reference_data_locations and not",
"assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs):",
"as top-level functions, # to work will with frameworks like pytest. ReferenceTest.__all__ =",
"returning a list of field names to use. The default csv loader function",
"fact wrong, or because the intended behaviour has changed. The functionality provided by",
"output. - There is support for ignoring lines within the strings/files that contain",
"is typically useful for filtering out things like version numbers and timestamps that",
"often useful to be able to see information from failing tests as they",
"function print_fn = default_print_fn # Magic so that an instance of this class",
"the differences between the actual output and the expected output. - There is",
"Source repository: http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic Solutions Limited 2016 \"\"\" from",
"path for a text file. ref_path is the name of the reference file.",
"from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is the default location for writing failing",
"ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check that an",
"The default csv loader function is a wrapper around pandas pd.read_csv(), with default",
"right stripped before the comparison is carried out. Note: the stripping on a",
"while running tests. Reference tests tend to take longer to run than traditional",
"max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs)",
"sortby=None, precision=None, **kwargs): \"\"\" Check that a csv file matches a reference one.",
"This provides the ReferenceTestCase class, which is a subclass of, and drop-in replacement",
"Set default parameters, at the class level. These defaults will apply to all",
"Pandas dataset matches a reference one from a saved reference csv file. df",
"if self.verbose and self.print_fn: self.print_fn('Written %s' % reference_path) def check_failures(self, failures, msgs): \"\"\"",
"unit-tests. It should take two parameters: - a value (which should evaluate as",
"precision=None): \"\"\" Check that an in-memory Pandas dataframe matches an in-memory reference one.",
"r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) =",
"instance of ReferenceTestCase as a superclass. If calls to assertFileCorrect() (etc) are made",
"of the methods from ReferenceTest, as functions that can be called directly as",
"Reference kind, used to locate the reference csv files. csv_read_fn A function to",
"using the # set_regeneration() class-method. Can be initialized via the -w option. regenerate",
"False by default. \"\"\" cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self, location, kind=None): \"\"\"",
"for text files. ref_paths is a list of names of the matching reference",
"function will be applied to both the actual and expected. max_permutation_cases is an",
"dataframe as its single parameter, and returning a list of field names to",
"via set_data_location(). actual_path Optional parameter, giving path for file where actual dataframe originated,",
"with all of the methods from ReferenceTest. The functionality is also available through",
"directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing which kinds of reference files should",
"by default. print_fn Sets the print function globally, to specify the function to",
"because the intended behaviour has changed. The functionality provided by this class is",
"in the output from run to run, but which do not indicate a",
"via set_data_location(). kind is the reference kind, used to locate the reference csv",
"expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs)",
"useful when software produces either a (text or csv) file or a string",
"DEFAULT_FAIL_DIR # Dictionary describing which kinds of reference files should be # regenerated",
"\"\"\" Declare the filesystem location for reference files of a particular kind. Typically",
"path for file where actual dataframe originated, used for error messages. expected_path Optional",
"is not available. \"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order,",
"methods from ReferenceTest. The functionality is also available through the pytest framework, via",
"matching reference csv file. The location of the reference files is determined by",
"not os.path.isabs(path): if kind not in self.reference_data_locations: kind = None if kind in",
"self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\" Internal method for regenerating reference data. \"\"\"",
"open(actual_path) as fin: actual = fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\"",
"set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for comparing",
"preprocessing has taken place, so preprocessed versions of the files are written to",
"\"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None,",
"can be overridden using the set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class",
"be able to see information from failing tests as they happen, rather than",
"exception. \"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None,",
"reference data of that kind, rather than comparing. All of the regenerate flags",
"csv file. csv_read_fn A function to use to read a csv file to",
"csv_read_fn A function to use to read a csv file to obtain a",
"to locate the reference csv files. csv_read_fn A function to use to read",
"reference kind, used to locate the reference files. lstrip if set to true,",
"'\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework needs to print messages.",
"True # Temporary directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing which kinds of",
"framework will regenerate reference data of that kind, rather than comparing. All of",
"not evaluate as true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas =",
"support for re-writing the reference output with the actual output. This, obviously, should",
"than traditional unit tests, so it is often useful to be able to",
"calls to assertFileCorrect() (etc) are made for kinds of reference data that hasn't",
"if Pandas is not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path,",
"assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\"",
"which *must* be specified. If you haven't even defined the None default, and",
"functions that can be called directly as part of a pytest suite. \"\"\"",
"actual_path, reference_path): \"\"\" Internal method for regenerating reference data. \"\"\" with open(actual_path) as",
"actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a",
"Check that a collection of files matche the contents from matching collection of",
"which writes unbuffered to sys.stdout. tmp_dir Sets the tmp_dir property globally, to specify",
"self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs)",
"be ignored in the comparison. ignore_patterns is an optional list of regular expressions;",
"place. This can be overridden using the set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR',",
"strings/files that contain particular patterns or regular expressions. This is typically useful for",
"to use to display information while running tests. The function should have the",
"DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for comparing results against saved",
"seeing the differences between the actual output and the expected output. - There",
"paths, kind=None): \"\"\" Internal method for resolving a list of reference data files,",
"is often useful to be able to see information from failing tests as",
"in self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\" Internal method for regenerating",
"sortby Option to specify fields to sort by before comparing. condition Filter to",
"self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path): \"\"\" Internal method for regenerating reference data",
"kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that an",
"\"\"\" self.default_data_locations[kind] = location def __init__(self, assert_fn): \"\"\" Initializer for a ReferenceTest instance.",
"allowed; if the actual and expected lists differ only in that their lines",
"= self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases r =",
"by before comparing. condition Filter to be applied to datasets before comparing. It",
"loader Function to use to read a csv file to obtain a pandas",
"matches an in-memory reference one. df Actual dataframe. ref_df Expected dataframe. actual_path Optional",
"kind not in self.reference_data_locations: kind = None if kind in self.reference_data_locations: path =",
"file where actual dataframe originated, used for error messages. kind Reference kind, used",
"rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a collection of files matche",
"Can be initialized via the -w option. regenerate = {} # Dictionary describing",
"can be set: verbose Sets the boolean verbose flag globally, to control reporting",
"location, kind=None): \"\"\" Declare the default filesystem location for reference files of a",
"changed. The functionality provided by this class is available through python's standard unittest",
"two parameters: - a value (which should evaluate as true for the test",
"it will raise an exception. \"\"\" self.default_data_locations[kind] = location def __init__(self, assert_fn): \"\"\"",
"def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check",
"or can be a function that takes a dataframe as its single parameter",
"and should only include explicit anchors if they need refer to the whole",
"is an optional list of substrings; lines containing any of these substrings will",
"output and the expected output. - There is support for ignoring lines within",
"use this print function, but you can override it by passing in a",
"lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a file matches the",
"from calls to the set_default_data_location class-method. If you haven't even defined the None",
"used, or, if that is not defined, it defaults to /tmp, c:\\temp or",
"If calls to assertFileCorrect() (etc) are made for kinds of reference data that",
"via set_data_location(). kind is the reference kind, used to locate the reference file.",
"actual string. ref_csv is the name of the reference csv file. The location",
"specify fields to compare field order. check_extra_cols Option to specify fields in the",
"Function to use to read a csv file to obtain a pandas dataframe.",
"permutations does not exceed this limit, then the two are considered to be",
"places to compare float values. The check_* comparison flags can be of any",
"the ReferenceTest class. If the regenerate flag is set to True, then the",
"are set to False by default. \"\"\" cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self,",
"so that all of its methods can be made available as top-level functions,",
"to False by default. \"\"\" cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self, location, kind=None):",
"are permutations of each other, and the number of such permutations does not",
"reference one from a saved reference csv file. df Actual dataframe. ref_csv Name",
"\"\"\" Check that an in-memory Pandas dataframe matches an in-memory reference one. df",
"this class is available through python's standard unittest framework, via the referencetestcase module.",
"empty string, NaN, and NULL keep_default_na is False Raises NotImplementedError if Pandas is",
"is \"\" quoting is csv.QUOTE_MINIMAL escapechar is \\ na_values are the empty string,",
"== 'verbose': cls.verbose = kwargs[k] elif k == 'print_fn': cls.print_fn = kwargs[k] elif",
"file. string is the actual string. ref_csv is the name of the reference",
"to set the per-kind data locations for an individual instance of the class.",
"filesystem location for reference files of a particular kind. This sets the location",
"actual_path is a path for a text file. ref_path is the name of",
"# so that all of its methods can be made available as top-level",
"pass in these locations though its __init__ method when constructing an instance of",
"check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths,",
"Check that an in-memory Pandas dataset matches a reference one from a saved",
"msgs) = r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None,",
"messages. kind Reference kind, used to locate the reference csv file. check_data Option",
"carried out. Note: the stripping on a per-line basis. ignore_substrings is an optional",
"returns, as appropriate. \"\"\" for k in kwargs: if k == 'verbose': cls.verbose",
"%s' % reference_path) def check_failures(self, failures, msgs): \"\"\" Internal method for check for",
"reference one. actual_path Actual csv file. ref_csv Name of reference csv file. The",
"= self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r",
"reference files. lstrip if set to true, both strings are left stripped before",
"ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that an in-memory string matches the contents",
"file matches a reference one. actual_path Actual csv file. ref_csv Name of reference",
"its location defined explicitly, then the default location is used. This is the",
"import FilesComparison # DEFAULT_FAIL_DIR is the default location for writing failing output #",
"The functionality is also available through the pytest framework, via the referencepytest module.",
"csv_read_fn function. The check_* comparison flags can be of any of the following:",
"are right stripped before the comparison is carried out. Note: the stripping on",
"run to run, but which do not indicate a problem. - There is",
"directly as part of a pytest suite. \"\"\" # Verbose flag verbose =",
"giving path for file where actual dataframe originated, used for error messages. kind",
"class ReferenceTest(object): \"\"\" Class for comparing results against saved \"known to be correct\"",
"reference file is determined by the configuration via set_data_location(). kind is the reference",
"which do not indicate a problem. - There is support for re-writing the",
"the output from run to run, but which do not indicate a problem.",
"an in-memory string matches the contents from a reference text file. string is",
"the framework needs to print messages. By default, it will use this print",
"to the set_default_data_location class-method. If you haven't even defined the None default, and",
"from matching collection of reference text files. actual_paths is a list of paths",
"taking a dataframe as its single parameter, and returning a list of field",
"\"\"\" Check that a csv file matches a reference one. actual_path Actual csv",
"self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0):",
"These defaults will apply to all instances of ReferenceTest subsequently created. The following",
"ignoring lines within the strings/files that contain particular patterns or regular expressions. This",
"self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework needs",
"ReferenceTest instance. assert_fn Function to be used to make assertions for unit-tests. It",
"self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc = ignore_substrings ip = ignore_patterns",
"class can masquerade as a module, # so that all of its methods",
"actual_path Actual csv file. ref_csv Name of reference csv file. The location of",
"to use to read a csv file to obtain a pandas dataframe. If",
"to both the actual and expected. max_permutation_cases is an optional number specifying the",
"import division from __future__ import unicode_literals import os import sys import tempfile from",
"max_permutation_cases=0): \"\"\" Check that a file matches the contents from a reference text",
"False (to skip that kind of comparison completely) - a list of field",
"against saved \"known to be correct\" reference results. This is typically useful when",
"raise Exception('No reference data location for \"%s\"' % kind) return path def resolve_reference_paths(self,",
"fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method for regenerating reference",
"msgs) = r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None,",
"to specify fields to sort by before comparing. condition Filter to be applied",
"self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures,",
"two are considered to be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind):",
"**kwargs): \"\"\" Check that a csv file matches a reference one. actual_paths List",
"to the whole line. preprocess is an optional function that takes a list",
"from ReferenceTest. The functionality is also available through the pytest framework, via the",
"Function to be used to make assertions for unit-tests. It should take two",
"see the contents of the files after preprocessing has taken place, so preprocessed",
"than comparing. All of the regenerate flags are set to False by default.",
"csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that an in-memory",
"testing for test-driven data analysis. Source repository: http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic",
"the comparison between a string and a file fails, the actual string is",
"2016 \"\"\" from __future__ import absolute_import from __future__ import print_function from __future__ import",
"True by default. print_fn Sets the print function globally, to specify the function",
"individual instance of the class. If calls to assertFileCorrect() (etc) are made for",
"of permutations allowed; if the actual and expected lists differ only in that",
"apply that kind of comparison to all fields) - False (to skip that",
"pathnames are included in the failure messages. If not explicitly set by set_defaults(),",
"with the actual output. This, obviously, should be used only after careful checking",
"patterns or regular expressions. This is typically useful for filtering out things like",
"(to specify which rows should be compared). precision Number of decimal places to",
"single parameter, and returning a list of field names to use. The default",
"of the reference files is determined by the configuration via set_data_location(). kind is",
"by the configuration via set_data_location(). actual_path Optional parameter, giving path for file where",
"kind, used to locate the reference csv file. csv_read_fn A function to use",
"its __init__ method when constructing an instance of ReferenceTestCase as a superclass. If",
"self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures,",
"main features are: - If the comparison between a string and a file",
"should be # regenerated when the tests are run. This should be set",
"failing output # if assertStringCorrect or assertFileCorrect fail with 'preprocessing' # in place.",
"expected. max_permutation_cases is an optional number specifying the maximum number of permutations allowed;",
"if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip,",
"places to compare float values. **kwargs Any additional named parameters are passed straight",
"check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None,",
"relative path. \"\"\" if self.reference_data_locations and not os.path.isabs(path): if kind not in self.reference_data_locations:",
"# Default print function print_fn = default_print_fn # Magic so that an instance",
"make calls to assertFileCorrect() (etc) using relative pathnames for the reference data files,",
"single parameter, and returning a list of field names to use. Raises NotImplementedError",
"default parameters, at the class level. These defaults will apply to all instances",
"to assertFileCorrect() (etc) using relative pathnames for the reference data files, then it",
"There is support for ignoring lines within the strings/files that contain particular patterns",
"particular patterns or regular expressions. This is typically useful for filtering out things",
"use assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else:",
"line. preprocess is an optional function that takes a list of strings and",
"\"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases",
"that an in-memory Pandas dataset matches a reference one from a saved reference",
"wrong, or because the intended behaviour has changed. The functionality provided by this",
"to read a csv file to obtain a pandas dataframe. If None, then",
"permutations of each other, and the number of such permutations does not exceed",
"contain particular patterns or regular expressions. This is typically useful for filtering out",
"outfile = kwargs.get('file', sys.stdout) outfile.flush() # Default print function print_fn = default_print_fn #",
"declared for kind=None, which *must* be specified. This method overrides any global defaults",
"kind of file should be regenerated. \"\"\" if kind not in self.regenerate: kind",
"a list of field names to use. The default csv loader function is",
"where temporary files are written. Temporary files are created whenever a text file",
"Number of decimal places to compare float values. **kwargs Any additional named parameters",
"from run to run, but which do not indicate a problem. - There",
"calls to the set_default_data_location class-method. If you haven't even defined the None default,",
"all fields) - False (to skip that kind of comparison completely) - a",
"ReferenceTest(object): \"\"\" Class for comparing results against saved \"known to be correct\" reference",
"the default location for writing failing output # if assertStringCorrect or assertFileCorrect fail",
"standard unittest framework, via the referencetestcase module. This provides the ReferenceTestCase class, which",
"If you haven't even defined the None default, and you make calls to",
"the actual dataset to use to check that there are no unexpected extra",
"environment variable TDDA_FAIL_DIR is used, or, if that is not defined, it defaults",
"self.regenerate: kind = None return kind in self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path,",
"is carried out. Note: the stripping on a per-line basis. rstrip if set",
"'preprocess' function has been specified. It's useful to be able to see the",
"for reference files of a particular kind. Typically you would subclass ReferenceTestCase and",
"ref_csvs List of names of matching reference csv file. The location of the",
"fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written %s' % reference_path) def check_failures(self, failures, msgs):",
"but which do not indicate a problem. - There is support for re-writing",
"returns a vector of booleans (to specify which rows should be compared). precision",
"created. The instance method set_data_location() can be used to set the per-kind data",
"in that their lines are permutations of each other, and the number of",
"passing in a print_fn parameter to __init__. \"\"\" print(*args, **kwargs) outfile = kwargs.get('file',",
"compare typees. check_order Option to specify fields to compare field order. check_extra_cols Option",
"vector of booleans (to specify which rows should be compared). precision Number of",
"assertions for unit-tests. It should take two parameters: - a value (which should",
"a wrapper around pandas pd.read_csv(), with default options as follows: index_col is None",
"all of the same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for p in paths]",
"comparing. It can be None, or can be a function that takes a",
"file and a diff command is suggested for seeing the differences between the",
"the standard pandas pd.read_csv() function. check_data Option to specify fields to compare values.",
"By default, it will use this print function, but you can override it",
"to True, then the framework will regenerate reference data of that kind, rather",
"ReferenceTest, as functions that can be called directly as part of a pytest",
"the configuration via set_data_location(). kind is the reference kind, used to locate the",
"set to true, both strings are right stripped before the comparison is carried",
"re-writing the reference output with the actual output. This, obviously, should be used",
"error messages. kind Reference kind, used to locate the reference csv file. check_data",
"can masquerade as a module, # so that all of its methods can",
"be applied to both the actual and expected. max_permutation_cases is an optional number",
"fail with 'preprocessing' # in place. This can be overridden using the set_defaults()",
"kind Reference kind, used to locate the reference csv file. check_data Option to",
"outfile.flush() # Default print function print_fn = default_print_fn # Magic so that an",
"is determined by the configuration via set_data_location(). kind Reference kind, used to locate",
"rather than comparing. All of the regenerate flags are set to False by",
"be considered to be the same if they only differ in substrings that",
"should only include explicit anchors if they need refer to the whole line.",
"and their pathnames are included in the failure messages. If not explicitly set",
"Reference kind, used to locate the reference csv file. check_data Option to specify",
"by set_default_data_location(). default_data_locations = {} @classmethod def set_defaults(cls, **kwargs): \"\"\" Set default parameters,",
"while running tests. The function should have the same signature as python's __future__",
"is the name of the reference csv file. The location of the reference",
"matches the contents from a reference text file. actual_path is a path for",
"condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv,",
"= r self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None,",
"check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check that an in-memory Pandas dataframe",
"NULL keep_default_na is False Raises NotImplementedError if Pandas is not available. \"\"\" expected_paths",
"sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None,",
"kinds of reference files should be # regenerated when the tests are run.",
"files, all of the same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for p in",
"to be identical. This should be used for unstructured data such as logfiles,",
"reference data location for \"%s\"' % kind) return path def resolve_reference_paths(self, paths, kind=None):",
"self.reference_data_locations: kind = None if kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else:",
"string is written to a file and a diff command is suggested for",
"self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision)",
"called directly as part of a pytest suite. \"\"\" # Verbose flag verbose",
"set: verbose Sets the boolean verbose flag globally, to control reporting of errors",
"the expected output. - There is support for ignoring lines within the strings/files",
"self.default_data_locations[kind] = location def __init__(self, assert_fn): \"\"\" Initializer for a ReferenceTest instance. assert_fn",
"Raises NotImplementedError if Pandas is not available. \"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path,",
"expected output. - There is support for ignoring lines within the strings/files that",
"condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that a csv file matches a reference",
"specify fields in the actual dataset to use to check that there are",
"both strings are right stripped before the comparison is carried out. Note: the",
"This, obviously, should be used only after careful checking that the new output",
"the class. If calls to assertFileCorrect() (etc) are made for kinds of reference",
"method overrides any global defaults set from calls to the set_default_data_location class-method. If",
"correct\" reference results. This is typically useful when software produces either a (text",
"to control reporting of errors while running tests. Reference tests tend to take",
"TDDA_FAIL_DIR is used, or, if that is not defined, it defaults to /tmp,",
"framework, via the referencetestcase module. This provides the ReferenceTestCase class, which is a",
"kind is the reference kind, used to locate the reference csv file. lstrip",
"max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None): \"\"\" Internal",
"FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\" Declare the filesystem location for",
"of reference text files. actual_paths is a list of paths for text files.",
"fields to compare field order. check_extra_cols Option to specify fields in the actual",
"path for file where actual dataframe originated, used for error messages. kind Reference",
"assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that",
"resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method for resolving a list of reference data",
"The check_* comparison flags can be of any of the following: - None",
"fin: actual = fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method",
"location of the reference files is determined by the configuration via set_data_location(). kind",
"self.write_reference_result(string, expected_path) else: ilc = ignore_substrings ip = ignore_patterns mpc = max_permutation_cases r",
"data of that kind, rather than comparing. All of the regenerate flags are",
"no unexpected extra columns. sortby Option to specify fields to sort by before",
"\"\"\" Internal method for deciding where a reference data file should be looked",
"preprocess=None, max_permutation_cases=0): \"\"\" Check that a file matches the contents from a reference",
"reference data, for # each kind. Can be initialized by set_default_data_location(). default_data_locations =",
"(c) Stochastic Solutions Limited 2016 \"\"\" from __future__ import absolute_import from __future__ import",
"ref_df Expected dataframe. actual_path Optional parameter, giving path for file where actual dataframe",
"pandas dataframe. If None, then a default csv loader is used. The check_*",
"expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases r",
"Default print function print_fn = default_print_fn # Magic so that an instance of",
"from a reference text file. string is the actual string. ref_csv is the",
"is used, which takes the same parameters as the standard pandas pd.read_csv() function.",
"that a csv file matches a reference one. actual_paths List of Actual csv",
"Number of decimal places to compare float values. loader Function to use to",
"= FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\" Declare the filesystem location",
"reference files should be # regenerated when the tests are run. This should",
"value (which should evaluate as true for the test to pass) - a",
"pytest framework, via the referencepytest module. This module provides all of the methods",
"a file and a diff command is suggested for seeing the differences between",
"[self.resolve_reference_path(p, kind=kind) for p in paths] def should_regenerate(self, kind): \"\"\" Internal method to",
"if assertStringCorrect or assertFileCorrect fail with 'preprocessing' # in place. This can be",
"exception. \"\"\" self.default_data_locations[kind] = location def __init__(self, assert_fn): \"\"\" Initializer for a ReferenceTest",
"class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for comparing results",
"flag is set to True, then the framework will regenerate reference data of",
"of the reference file is determined by the configuration via set_data_location(). actual_path Optional",
"kind. Can be initialized by set_default_data_location(). default_data_locations = {} @classmethod def set_defaults(cls, **kwargs):",
"file is determined by the configuration via set_data_location(). kind Reference kind, used to",
"via set_data_location(). kind Reference kind, used to locate the reference csv files. csv_read_fn",
"takes a list of strings and preprocesses it in some way; this function",
"to the csv_read_fn function. The check_* comparison flags can be of any of",
"not explicitly set by set_defaults(), the environment variable TDDA_FAIL_DIR is used, or, if",
"A function to use to read a csv file to obtain a pandas",
"is correct, either because the previous output was in fact wrong, or because",
"tmp_dir property globally, to specify the directory where temporary files are written. Temporary",
"reference data files, all of the same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for",
"the regeneration flag for a particular kind of reference file, globally, for all",
"actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs)",
"for the test to pass) - a string (to report details of how",
"For csv files, use assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind):",
"is typically useful when software produces either a (text or csv) file or",
"or whatever tempfile.gettempdir() returns, as appropriate. \"\"\" for k in kwargs: if k",
"and NULL keep_default_na is False Raises NotImplementedError if Pandas is not available. \"\"\"",
"used. This is the location declared for kind=None, which *must* be specified. This",
"quoting is csv.QUOTE_MINIMAL escapechar is \\ na_values are the empty string, NaN, and",
"of matching reference csv file. The location of the reference files is determined",
"ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby,",
"values. check_types Option to specify fields to compare typees. check_order Option to specify",
"must not contain parenthesised groups, and should only include explicit anchors if they",
"= max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures,",
"be # regenerated when the tests are run. This should be set using",
"The following parameters can be set: verbose Sets the boolean verbose flag globally,",
"self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures,",
"default. \"\"\" cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self, location, kind=None): \"\"\" Declare the",
"r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None,",
"variable TDDA_FAIL_DIR is used, or, if that is not defined, it defaults to",
"then a default csv loader is used, which takes the same parameters as",
"{} # Dictionary describing default location for reference data, for # each kind.",
"its single parameter, and returning a list of field names to use. Raises",
"- a value (which should evaluate as true for the test to pass)",
"self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order,",
"has changed. The functionality provided by this class is available through python's standard",
"this limit, then the two are considered to be identical. \"\"\" expected_path =",
"one. df Actual dataframe. ref_df Expected dataframe. actual_path Optional parameter, giving path for",
"to be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else:",
"method for regenerating reference data. \"\"\" with open(actual_path) as fin: actual = fin.read()",
"print function print_fn = default_print_fn # Magic so that an instance of this",
"quotechar is \"\" quoting is csv.QUOTE_MINIMAL escapechar is \\ na_values are the empty",
"for, if it has been specified using a relative path. \"\"\" if self.reference_data_locations",
"for error messages. check_data Option to specify fields to compare values. check_types Option",
"straight through to the csv_read_fn function. The check_* comparison flags can be of",
"assert_fn Function to be used to make assertions for unit-tests. It should take",
"file. actual_path is a path for a text file. ref_path is the name",
"os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference data location for \"%s\"' % kind) return",
"to make assertions for unit-tests. It should take two parameters: - a value",
"msgs) def resolve_reference_path(self, path, kind=None): \"\"\" Internal method for deciding where a reference",
"it will use this print function, but you can override it by passing",
"a list of field names - a function taking a dataframe as its",
"a per-line basis. rstrip if set to true, both strings are right stripped",
"__future__ print function. If not specified, a default print function is used which",
"ReferenceTest subsequently created. The following parameters can be set: verbose Sets the boolean",
"csv files. csv_read_fn A function to use to read a csv file to",
"ref_csv is the name of the reference csv file. The location of the",
"is a list of paths for text files. ref_paths is a list of",
"a collection of files matche the contents from matching collection of reference text",
"takes a dataframe as its single parameter and returns a vector of booleans",
"can't check correctness, so it will raise an exception. \"\"\" self.default_data_locations[kind] = location",
"\"%s\"' % kind) return path def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method for",
"__init__(self, assert_fn): \"\"\" Initializer for a ReferenceTest instance. assert_fn Function to be used",
"can be of any of the following: - None (to apply that kind",
"that kind, rather than comparing. All of the regenerate flags are set to",
"make assertions for unit-tests. It should take two parameters: - a value (which",
"such as logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_path =",
"is written to a file and a diff command is suggested for seeing",
"value does not evaluate as true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations)",
"globally, to specify the function to use to display information while running tests.",
"string and a file fails, the actual string is written to a file",
"the comparison is carried out. Note: the stripping on a per-line basis. rstrip",
"assertFileCorrect() (etc) using relative pathnames for the reference data files, then it can't",
"the tests are run. This should be set using the # set_regeneration() class-method.",
"= kwargs[k] else: raise Exception('set_defaults: Unrecogized option %s' % k) @classmethod def set_regeneration(cls,",
"order. check_extra_cols Option to specify fields in the actual dataset to use to",
"methods can be made available as top-level functions, # to work will with",
"function. The check_* comparison flags can be of any of the following: -",
"This is typically useful for filtering out things like version numbers and timestamps",
"with 'preprocessing' # in place. This can be overridden using the set_defaults() class",
"that kind of comparison to all fields) - False (to skip that kind",
"This is the location declared for kind=None, which *must* be specified. If you",
"kind of comparison to all fields) - False (to skip that kind of",
"class-method. If you haven't even defined the None default, and you make calls",
"kind=None): \"\"\" Internal method for deciding where a reference data file should be",
"expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn)",
"a particular kind of file should be regenerated. \"\"\" if kind not in",
"reference csv file. df Actual dataframe. ref_csv Name of reference csv file. The",
"coding: utf-8 -*- \"\"\" referencetest.py: refererence testing for test-driven data analysis. Source repository:",
"the reference kind, used to locate the reference file. lstrip if set to",
"this function will be applied to both the actual and expected. max_permutation_cases is",
"the name of the reference file. The location of the reference file is",
"\"\"\" Declare the default filesystem location for reference files of a particular kind.",
"that an in-memory Pandas dataframe matches an in-memory reference one. df Actual dataframe.",
"to be the same if they only differ in substrings that match one",
"expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path): \"\"\" Internal method",
"cls.print_fn = kwargs[k] elif k == 'tmp_dir': cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults:",
"\"\"\" self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files =",
"= os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference data location for \"%s\"' % kind)",
"assertFileCorrect fail with 'preprocessing' # in place. This can be overridden using the",
"def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs):",
"csv file. ref_csv Name of reference csv file. The location of the reference",
"any of the following: - None (to apply that kind of comparison to",
"is set to True by default. print_fn Sets the print function globally, to",
"a diff command is suggested for seeing the differences between the actual output",
"of paths for text files. ref_paths is a list of names of the",
"match one of these regular expressions. The expressions must not contain parenthesised groups,",
"tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is the default location for writing failing output",
"reference one. df Actual dataframe. ref_df Expected dataframe. actual_path Optional parameter, giving path",
"a reference text file. actual_path is a path for a text file. ref_path",
"verbose = True # Temporary directory tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing which",
"unstructured data such as logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\"",
"ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv,",
"results. This is typically useful when software produces either a (text or csv)",
"set_default_data_location(self, location, kind=None): \"\"\" Declare the default filesystem location for reference files of",
"Dictionary describing which kinds of reference files should be # regenerated when the",
"some way; this function will be applied to both the actual and expected.",
"are written. Temporary files are created whenever a text file check fails and",
"is the reference kind, used to locate the reference files. lstrip if set",
"the contents from a reference text file. actual_path is a path for a",
"are written to this directory, and their pathnames are included in the failure",
"as functions that can be called directly as part of a pytest suite.",
"used for unstructured data such as logfiles, etc. For csv files, use assertCSVFileCorrect",
"a (text or csv) file or a string as output. The main features",
"a string as output. The main features are: - If the comparison between",
"kind = None if kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else: raise",
"matches a reference one. actual_paths List of Actual csv files. ref_csvs List of",
"default. print_fn Sets the print function globally, to specify the function to use",
"anchors if they need refer to the whole line. preprocess is an optional",
"(to report details of how a test failed, if the value does not",
"expected_path) else: ilc = ignore_substrings ip = ignore_patterns mpc = max_permutation_cases r =",
"self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df,",
"\"\"\" Check that an in-memory string matches the contents from a reference text",
"sortby=None, precision=None): \"\"\" Check that an in-memory Pandas dataframe matches an in-memory reference",
"data files, all of the same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for p",
"\"\"\" referencetest.py: refererence testing for test-driven data analysis. Source repository: http://github.com/tdda/tdda License: MIT",
"files after preprocessing has taken place, so preprocessed versions of the files are",
"the reference files. lstrip if set to true, both strings are left stripped",
"the two are considered to be identical. This should be used for unstructured",
"carried out. Note: the stripping on a per-line basis. rstrip if set to",
"stripping on a per-line basis. rstrip if set to true, both strings are",
"actual output and the expected output. - There is support for ignoring lines",
"of reference data files, all of the same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind)",
"is the location declared for kind=None, which *must* be specified. This method overrides",
"decimal places to compare float values. The check_* comparison flags can be of",
"to use to check that there are no unexpected extra columns. sortby Option",
"fields in the actual dataset to use to check that there are no",
"is an optional function that takes a list of strings and preprocesses it",
"instances of the ReferenceTest class subsequently created. The instance method set_data_location() can be",
"provides all of the methods from ReferenceTest, as functions that can be called",
"lists differ only in that their lines are permutations of each other, and",
"- a string (to report details of how a test failed, if the",
"else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) =",
"are made for kinds of reference data that hasn't had its location defined",
"Pandas is not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths)",
"if it has been specified using a relative path. \"\"\" if self.reference_data_locations and",
"actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check",
"version numbers and timestamps that vary in the output from run to run,",
"has taken place, so preprocessed versions of the files are written to this",
"the name of the reference csv file. The location of the reference file",
"will affect all instances of the ReferenceTest class subsequently created. The instance method",
"= PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\"",
"to specify the function to use to display information while running tests. The",
"if Pandas is not available. \"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data,",
"default location is used. This is the location declared for kind=None, which *must*",
"fields) - False (to skip that kind of comparison completely) - a list",
"set_data_location(self, location, kind=None): \"\"\" Declare the filesystem location for reference files of a",
"msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None,",
"\\ na_values are the empty string, NaN, and NULL keep_default_na is False Raises",
"expressions. This is typically useful for filtering out things like version numbers and",
"parameters, at the class level. These defaults will apply to all instances of",
"for filtering out things like version numbers and timestamps that vary in the",
"%s' % k) @classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the regeneration flag",
"failing tests as they happen, rather than waiting for the full report at",
"else: ilc = ignore_substrings ip = ignore_patterns mpc = max_permutation_cases r = self.files.check_string_against_file(string,",
"via the -w option. regenerate = {} # Dictionary describing default location for",
"globally, for all instances of the ReferenceTest class. If the regenerate flag is",
"if kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference data",
"to true, both strings are right stripped before the comparison is carried out.",
"csv file matches a reference one. actual_path Actual csv file. ref_csv Name of",
"check for failures and reporting them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def",
"messages. By default, it will use this print function, but you can override",
"the contents of the files after preprocessing has taken place, so preprocessed versions",
"behaviour has changed. The functionality provided by this class is available through python's",
"preprocess=None, max_permutation_cases=0): \"\"\" Check that a collection of files matche the contents from",
"to True by default. print_fn Sets the print function globally, to specify the",
"suggested for seeing the differences between the actual output and the expected output.",
"and pass in these locations though its __init__ method when constructing an instance",
"def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None,",
"that takes a list of strings and preprocesses it in some way; this",
"the files after preprocessing has taken place, so preprocessed versions of the files",
"through the pytest framework, via the referencepytest module. This module provides all of",
"of reference csv file. The location of the reference file is determined by",
"csv loader function is a wrapper around pandas pd.read_csv(), with default options as",
"set_data_location(). kind is the reference kind, used to locate the reference files. lstrip",
"expected_path Optional parameter, giving path for file where expected dataframe originated, used for",
"considered to be identical. This should be used for unstructured data such as",
"as fin: actual = fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal",
"to locate the reference files. lstrip if set to true, both strings are",
"as follows: index_col is None infer_datetime_format is True quotechar is \"\" quoting is",
"configuration via set_data_location(). kind Reference kind, used to locate the reference csv file.",
"= r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None,",
"the following: - None (to apply that kind of comparison to all fields)",
"should have the same signature as python's __future__ print function. If not specified,",
"that an instance of this class can masquerade as a module, # so",
"the intended behaviour has changed. The functionality provided by this class is available",
"of the reference files is determined by the configuration via set_data_location(). kind Reference",
"<filename>inst/tdda/referencetest/referencetest.py # -*- coding: utf-8 -*- \"\"\" referencetest.py: refererence testing for test-driven data",
"None return kind in self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\" Internal",
"function to use to display information while running tests. The function should have",
"self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases r = self.files.check_files(actual_paths,",
"import os import sys import tempfile from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import",
"deciding where a reference data file should be looked for, if it has",
"def __init__(self, assert_fn): \"\"\" Initializer for a ReferenceTest instance. assert_fn Function to be",
"reference csv file. The location of the reference file is determined by the",
"be called directly as part of a pytest suite. \"\"\" # Verbose flag",
"test to pass) - a string (to report details of how a test",
"This is typically useful when software produces either a (text or csv) file",
"License: MIT Copyright (c) Stochastic Solutions Limited 2016 \"\"\" from __future__ import absolute_import",
"an in-memory Pandas dataframe matches an in-memory reference one. df Actual dataframe. ref_df",
"kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a file matches",
"Internal method to determine if a particular kind of file should be regenerated.",
"and returns a vector of booleans (to specify which rows should be compared).",
"if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby,",
"the two are considered to be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if",
"the contents from matching collection of reference text files. actual_paths is a list",
"to check that there are no unexpected extra columns. sortby Option to specify",
"of a pytest suite. \"\"\" # Verbose flag verbose = True # Temporary",
"location of the reference file is determined by the configuration via set_data_location(). actual_path",
"is csv.QUOTE_MINIMAL escapechar is \\ na_values are the empty string, NaN, and NULL",
"__future__ import division from __future__ import unicode_literals import os import sys import tempfile",
"contents from matching collection of reference text files. actual_paths is a list of",
"check_extra_cols Option to specify fields in the actual dataset to use to check",
"indicate a problem. - There is support for re-writing the reference output with",
"tmp_dir Sets the tmp_dir property globally, to specify the directory where temporary files",
"not defined, it defaults to /tmp, c:\\temp or whatever tempfile.gettempdir() returns, as appropriate.",
"fout: fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written %s' % reference_path) def check_failures(self, failures,",
"the set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for",
"not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r",
"dataframe originated, used for error messages. kind Reference kind, used to locate the",
"self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None): \"\"\" Internal method for deciding where a",
"are no unexpected extra columns. sortby Option to specify fields to sort by",
"import sys import tempfile from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import FilesComparison #",
"lines are permutations of each other, and the number of such permutations does",
"matches a reference one from a saved reference csv file. df Actual dataframe.",
"parameter, giving path for file where actual dataframe originated, used for error messages.",
"its methods can be made available as top-level functions, # to work will",
"@staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework needs to print messages. By",
"all of the methods from ReferenceTest. The functionality is also available through the",
"csv file. The location of the reference file is determined by the configuration",
"a pandas dataframe. If None, then a default csv loader is used. The",
"permutations allowed; if the actual and expected lists differ only in that their",
"of substrings; lines containing any of these substrings will be ignored in the",
"particular kind of reference file, globally, for all instances of the ReferenceTest class.",
"verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\" Declare the",
"keep_default_na is False Raises NotImplementedError if Pandas is not available. \"\"\" expected_path =",
"loader function is a wrapper around pandas pd.read_csv(), with default options as follows:",
"float values. loader Function to use to read a csv file to obtain",
"describing which kinds of reference files should be # regenerated when the tests",
"failures, msgs): \"\"\" Internal method for check for failures and reporting them. \"\"\"",
"= assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose,",
"by set_defaults(), the environment variable TDDA_FAIL_DIR is used, or, if that is not",
"a pandas dataframe. If None, then a default csv loader is used, which",
"The main features are: - If the comparison between a string and a",
"= regenerate @classmethod def set_default_data_location(self, location, kind=None): \"\"\" Declare the default filesystem location",
"careful checking that the new output is correct, either because the previous output",
"assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check",
"locate the reference csv file. lstrip if set to true, both strings are",
"Option to specify fields to sort by before comparing. condition Filter to be",
"if Pandas is not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths,",
"a list of strings and preprocesses it in some way; this function will",
"errors while running tests. Reference tests tend to take longer to run than",
"def default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework needs to print messages. By default,",
"fields to compare typees. check_order Option to specify fields to compare field order.",
"explicit anchors if they need refer to the whole line. preprocess is an",
"preprocess=None, max_permutation_cases=0): \"\"\" Check that an in-memory string matches the contents from a",
"it can't check correctness, so it will raise an exception. \"\"\" self.reference_data_locations[kind] =",
"the test to pass) - a string (to report details of how a",
"these substrings will be ignored in the comparison. ignore_patterns is an optional list",
"comparison is carried out. Note: the stripping on a per-line basis. ignore_substrings is",
"path def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method for resolving a list of",
"and a file fails, the actual string is written to a file and",
"= fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method for regenerating",
"single parameter and returns a vector of booleans (to specify which rows should",
"# regenerated when the tests are run. This should be set using the",
"None, then a default csv loader is used. The check_* comparison flags can",
"kind=None, which *must* be specified. This method overrides any global defaults set from",
"that there are no unexpected extra columns. sortby Option to specify fields to",
"files. \"\"\" for (actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result,",
"is carried out. Note: the stripping on a per-line basis. ignore_substrings is an",
"relative pathnames for the reference data files, then it can't check correctness, so",
"Actual dataframe. ref_csv Name of reference csv file. The location of the reference",
"escapechar is \\ na_values are the empty string, NaN, and NULL keep_default_na is",
"reference file. The location of the reference file is determined by the configuration",
"check_data Option to specify fields to compare values. check_types Option to specify fields",
"a csv file matches a reference one. actual_paths List of Actual csv files.",
"by this class is available through python's standard unittest framework, via the referencetestcase",
"reference data for a list of files. \"\"\" for (actual_path, expected_path) in zip(actual_paths,",
"be looked for, if it has been specified using a relative path. \"\"\"",
"an optional list of substrings; lines containing any of these substrings will be",
"class is available through python's standard unittest framework, via the referencetestcase module. This",
"the per-kind data locations for an individual instance of the class. If calls",
"this class can masquerade as a module, # so that all of its",
"specify the directory where temporary files are written. Temporary files are created whenever",
"messages. If not explicitly set by set_defaults(), the environment variable TDDA_FAIL_DIR is used,",
"that a file matches the contents from a reference text file. actual_path is",
"fails, the actual string is written to a file and a diff command",
"If not specified, a default print function is used which writes unbuffered to",
"property globally, to specify the directory where temporary files are written. Temporary files",
"the empty string, NaN, and NULL keep_default_na is False Raises NotImplementedError if Pandas",
"used only after careful checking that the new output is correct, either because",
"intended behaviour has changed. The functionality provided by this class is available through",
"import absolute_import from __future__ import print_function from __future__ import division from __future__ import",
"from __future__ import unicode_literals import os import sys import tempfile from tdda.referencetest.checkpandas import",
"you would subclass ReferenceTestCase and pass in these locations though its __init__ method",
"to locate the reference csv file. csv_read_fn A function to use to read",
"the reference file. lstrip if set to true, both strings are left stripped",
"per-line basis. rstrip if set to true, both strings are right stripped before",
"the reference data files, then it can't check correctness, so it will raise",
"reference kind, used to locate the reference file. lstrip if set to true,",
"one of these regular expressions. The expressions must not contain parenthesised groups, and",
"writes unbuffered to sys.stdout. tmp_dir Sets the tmp_dir property globally, to specify the",
"because the previous output was in fact wrong, or because the intended behaviour",
"file. ref_csv Name of reference csv file. The location of the reference file",
"\"\"\" if kind not in self.regenerate: kind = None return kind in self.regenerate",
"for file where expected dataframe originated, used for error messages. check_data Option to",
"of the ReferenceTest class. If the regenerate flag is set to True, then",
"filtering out things like version numbers and timestamps that vary in the output",
"are the empty string, NaN, and NULL keep_default_na is False Raises NotImplementedError if",
"= None return kind in self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\"",
"self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures,",
"This is the location declared for kind=None, which *must* be specified. This method",
"FilesComparison # DEFAULT_FAIL_DIR is the default location for writing failing output # if",
"else: raise Exception('set_defaults: Unrecogized option %s' % k) @classmethod def set_regeneration(cls, kind=None, regenerate=True):",
"files matche the contents from matching collection of reference text files. actual_paths is",
"kind = None return kind in self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path):",
"= self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision)",
"(actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path): \"\"\" Internal",
"for k in kwargs: if k == 'verbose': cls.verbose = kwargs[k] elif k",
"file. check_data Option to specify fields to compare values. check_types Option to specify",
"msgs) = r self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None): \"\"\" Internal method for",
"ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that",
"*must* be specified. This method overrides any global defaults set from calls to",
"self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types,",
"(etc) are made for kinds of reference data that hasn't had its location",
"max_permutation_cases=0): \"\"\" Check that an in-memory string matches the contents from a reference",
"one. actual_path Actual csv file. ref_csv Name of reference csv file. The location",
"subclass ReferenceTestCase and pass in these locations though its __init__ method when constructing",
"had its location defined explicitly, then the default location is used. This is",
"such as logfiles, etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_paths =",
"repository: http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic Solutions Limited 2016 \"\"\" from __future__",
"If the comparison between a string and a file fails, the actual string",
"files of a particular kind. Typically you would subclass ReferenceTestCase and pass in",
"files is determined by the configuration via set_data_location(). kind is the reference kind,",
"used to set the per-kind data locations for an individual instance of the",
"as part of a pytest suite. \"\"\" # Verbose flag verbose = True",
"max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs)",
"verbose Sets the boolean verbose flag globally, to control reporting of errors while",
"lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a collection of files",
"regenerated. \"\"\" if kind not in self.regenerate: kind = None return kind in",
"the function to use to display information while running tests. The function should",
"is support for re-writing the reference output with the actual output. This, obviously,",
"report at the end. Verbose is set to True by default. print_fn Sets",
"'verbose': cls.verbose = kwargs[k] elif k == 'print_fn': cls.print_fn = kwargs[k] elif k",
"where actual dataframe originated, used for error messages. expected_path Optional parameter, giving path",
"parameter and returns a vector of booleans (to specify which rows should be",
"to run, but which do not indicate a problem. - There is support",
"a reference one. actual_path Actual csv file. ref_csv Name of reference csv file.",
"self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0):",
"true, both strings are left stripped before the comparison is carried out. Note:",
"follows: index_col is None infer_datetime_format is True quotechar is \"\" quoting is csv.QUOTE_MINIMAL",
"This sets the location globally, and will affect all instances of the ReferenceTest",
"at the end. Verbose is set to True by default. print_fn Sets the",
"regenerating reference data for a list of files. \"\"\" for (actual_path, expected_path) in",
"function taking a dataframe as its single parameter, and returning a list of",
"kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases r = self.files.check_files(actual_paths, expected_paths,",
"originated, used for error messages. expected_path Optional parameter, giving path for file where",
"data location for \"%s\"' % kind) return path def resolve_reference_paths(self, paths, kind=None): \"\"\"",
"the same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for p in paths] def should_regenerate(self,",
"Option to specify fields to compare values. check_types Option to specify fields to",
"was in fact wrong, or because the intended behaviour has changed. The functionality",
"= self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs)",
"if k == 'verbose': cls.verbose = kwargs[k] elif k == 'print_fn': cls.print_fn =",
"tempfile from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is the",
"kind, used to locate the reference file. lstrip if set to true, both",
"specify fields to compare values. check_types Option to specify fields to compare typees.",
"msgs) def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\"",
"files. The location of the reference files is determined by the configuration via",
"the files are written to this directory, and their pathnames are included in",
"- False (to skip that kind of comparison completely) - a list of",
"method for deciding where a reference data file should be looked for, if",
"MIT Copyright (c) Stochastic Solutions Limited 2016 \"\"\" from __future__ import absolute_import from",
"kind is the reference kind, used to locate the reference file. lstrip if",
"\"\"\" Sometimes the framework needs to print messages. By default, it will use",
"df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\"",
"__future__ import unicode_literals import os import sys import tempfile from tdda.referencetest.checkpandas import PandasComparison",
"csv files, use assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path,",
"it is often useful to be able to see information from failing tests",
"is True quotechar is \"\" quoting is csv.QUOTE_MINIMAL escapechar is \\ na_values are",
"kind. This sets the location globally, and will affect all instances of the",
"with default options as follows: index_col is None infer_datetime_format is True quotechar is",
"include explicit anchors if they need refer to the whole line. preprocess is",
"actual and expected. max_permutation_cases is an optional number specifying the maximum number of",
"for the reference data files, then it can't check correctness, so it will",
"ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that an in-memory string matches the contents from",
"kind=None): \"\"\" Declare the default filesystem location for reference files of a particular",
"location declared for kind=None, which *must* be specified. If you haven't even defined",
"kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases r = self.files.check_file(actual_path, expected_path,",
"the class level. These defaults will apply to all instances of ReferenceTest subsequently",
"from __future__ import absolute_import from __future__ import print_function from __future__ import division from",
"regular expressions. The expressions must not contain parenthesised groups, and should only include",
"expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def",
"used for error messages. expected_path Optional parameter, giving path for file where expected",
"files are written to this directory, and their pathnames are included in the",
"obtain a pandas dataframe. If None, then a default csv loader is used,",
"precision Number of decimal places to compare float values. **kwargs Any additional named",
"actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check that an in-memory",
"expected_paths) else: mpc = max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns,",
"Reference tests tend to take longer to run than traditional unit tests, so",
"def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method for resolving a list of reference",
"reference files of a particular kind. This sets the location globally, and will",
"a default csv loader is used. The check_* comparison flags can be of",
"self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data,",
"a text file. ref_path is the name of the reference file. The location",
"expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases r",
"dataframe. ref_csv Name of reference csv file. The location of the reference file",
"file check fails and a 'preprocess' function has been specified. It's useful to",
"'w') as fout: fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written %s' % reference_path) def",
"print_fn Sets the print function globally, to specify the function to use to",
"text file. ref_path is the name of the reference file. The location of",
"file matches the contents from a reference text file. actual_path is a path",
"def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None): \"\"\"",
"previous output was in fact wrong, or because the intended behaviour has changed.",
"condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv,",
"method for regenerating reference data for a list of files. \"\"\" for (actual_path,",
"is the actual string. ref_csv is the name of the reference csv file.",
"for regenerating reference data for a list of files. \"\"\" for (actual_path, expected_path)",
"the configuration via set_data_location(). actual_path Optional parameter, giving path for file where actual",
"not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df",
"file should be regenerated. \"\"\" if kind not in self.regenerate: kind = None",
"to sys.stdout. tmp_dir Sets the tmp_dir property globally, to specify the directory where",
"the reference csv file. csv_read_fn A function to use to read a csv",
"used to locate the reference files. lstrip if set to true, both strings",
"differences between the actual output and the expected output. - There is support",
"list of substrings; lines containing any of these substrings will be ignored in",
"to a file and a diff command is suggested for seeing the differences",
"http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic Solutions Limited 2016 \"\"\" from __future__ import",
"rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFilesCorrect(self,",
"set from calls to the set_default_data_location class-method. If you haven't even defined the",
"the default location is used. This is the location declared for kind=None, which",
"r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures,",
"'tmp_dir': cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults: Unrecogized option %s' % k) @classmethod",
"and drop-in replacement for unittest.TestCase. It extends that class with all of the",
"its single parameter and returns a vector of booleans (to specify which rows",
"of errors while running tests. Reference tests tend to take longer to run",
"number of such permutations does not exceed this limit, then the two are",
"= kwargs.get('file', sys.stdout) outfile.flush() # Default print function print_fn = default_print_fn # Magic",
"\"\"\" Internal method for resolving a list of reference data files, all of",
"problem. - There is support for re-writing the reference output with the actual",
"affect all instances of the ReferenceTest class subsequently created. The instance method set_data_location()",
"it by passing in a print_fn parameter to __init__. \"\"\" print(*args, **kwargs) outfile",
"text file. actual_path is a path for a text file. ref_path is the",
"to all fields) - False (to skip that kind of comparison completely) -",
"There is support for re-writing the reference output with the actual output. This,",
"actual dataframe originated, used for error messages. kind Reference kind, used to locate",
"software produces either a (text or csv) file or a string as output.",
"evaluate as true for the test to pass) - a string (to report",
"print_function from __future__ import division from __future__ import unicode_literals import os import sys",
"also available through the pytest framework, via the referencepytest module. This module provides",
"kwargs.get('file', sys.stdout) outfile.flush() # Default print function print_fn = default_print_fn # Magic so",
"Temporary files are created whenever a text file check fails and a 'preprocess'",
"to compare typees. check_order Option to specify fields to compare field order. check_extra_cols",
"'print_fn': cls.print_fn = kwargs[k] elif k == 'tmp_dir': cls.tmp_dir = kwargs[k] else: raise",
"of ReferenceTest subsequently created. The following parameters can be set: verbose Sets the",
"decimal places to compare float values. loader Function to use to read a",
"masquerade as a module, # so that all of its methods can be",
"an individual instance of the class. If calls to assertFileCorrect() (etc) are made",
"from __future__ import print_function from __future__ import division from __future__ import unicode_literals import",
"constructing an instance of ReferenceTestCase as a superclass. If calls to assertFileCorrect() (etc)",
"strings and preprocesses it in some way; this function will be applied to",
"paths] def should_regenerate(self, kind): \"\"\" Internal method to determine if a particular kind",
"assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc",
"rather than waiting for the full report at the end. Verbose is set",
"= r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None,",
"number of permutations allowed; if the actual and expected lists differ only in",
"each kind. Can be initialized by set_default_data_location(). default_data_locations = {} @classmethod def set_defaults(cls,",
"\"\"\" print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush() # Default print function print_fn",
"field names to use. The default csv loader function is a wrapper around",
"ReferenceTestCase class, which is a subclass of, and drop-in replacement for unittest.TestCase. It",
"self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) =",
"Check that a file matches the contents from a reference text file. actual_path",
"module. This module provides all of the methods from ReferenceTest, as functions that",
"reference_path) def write_reference_result(self, result, reference_path): \"\"\" Internal method for regenerating reference data from",
"self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method for regenerating reference data",
"\"\"\" for (actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path):",
"data. \"\"\" with open(actual_path) as fin: actual = fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self,",
"rstrip if set to true, both strings are right stripped before the comparison",
"per-kind data locations for an individual instance of the class. If calls to",
"them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes the",
"same signature as python's __future__ print function. If not specified, a default print",
"information from failing tests as they happen, rather than waiting for the full",
"is available through python's standard unittest framework, via the referencetestcase module. This provides",
"DEFAULT_FAIL_DIR is the default location for writing failing output # if assertStringCorrect or",
"values. loader Function to use to read a csv file to obtain a",
"specify fields to sort by before comparing. condition Filter to be applied to",
"only after careful checking that the new output is correct, either because the",
"of files matche the contents from matching collection of reference text files. actual_paths",
"use to read a csv file to obtain a pandas dataframe. If None,",
"list of strings and preprocesses it in some way; this function will be",
"number specifying the maximum number of permutations allowed; if the actual and expected",
"ReferenceTestCase as a superclass. If calls to assertFileCorrect() (etc) are made for kinds",
"files should be # regenerated when the tests are run. This should be",
"an instance of this class can masquerade as a module, # so that",
"mpc = max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess,",
"made for kinds of reference data that hasn't had its location defined explicitly,",
"\"\"\" if self.reference_data_locations and not os.path.isabs(path): if kind not in self.reference_data_locations: kind =",
"(etc) using relative pathnames for the reference data files, then it can't check",
"values. **kwargs Any additional named parameters are passed straight through to the csv_read_fn",
"that class with all of the methods from ReferenceTest. The functionality is also",
"a default csv loader is used, which takes the same parameters as the",
"be compared). precision Number of decimal places to compare float values. **kwargs Any",
"os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for comparing results against saved \"known to",
"locate the reference file. lstrip if set to true, both strings are left",
"- a list of field names - a function taking a dataframe as",
"For csv files, use assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind):",
"kind=None): \"\"\" Internal method for resolving a list of reference data files, all",
"will raise an exception. \"\"\" self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df, ref_df, actual_path=None,",
"ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a file matches the contents from a",
"This should be set using the # set_regeneration() class-method. Can be initialized via",
"in substrings that match one of these regular expressions. The expressions must not",
"text files. actual_paths is a list of paths for text files. ref_paths is",
"names to use. The default csv loader function is a wrapper around pandas",
"the configuration via set_data_location(). kind Reference kind, used to locate the reference csv",
"function is a wrapper around pandas pd.read_csv(), with default options as follows: index_col",
"data file should be looked for, if it has been specified using a",
"option. regenerate = {} # Dictionary describing default location for reference data, for",
"determined by the configuration via set_data_location(). kind Reference kind, used to locate the",
"string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that an",
"assert_fn): \"\"\" Initializer for a ReferenceTest instance. assert_fn Function to be used to",
"\"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases",
"max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False,",
"messages. check_data Option to specify fields to compare values. check_types Option to specify",
"ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None):",
"stripping on a per-line basis. ignore_substrings is an optional list of substrings; lines",
"list of names of the matching reference files. The location of the reference",
"be set: verbose Sets the boolean verbose flag globally, to control reporting of",
"the filesystem location for reference files of a particular kind. Typically you would",
"to compare float values. The check_* comparison flags can be of any of",
"file. lstrip if set to true, both strings are left stripped before the",
"the pytest framework, via the referencepytest module. This module provides all of the",
"= kwargs[k] elif k == 'tmp_dir': cls.tmp_dir = kwargs[k] else: raise Exception('set_defaults: Unrecogized",
"precision=None, **kwargs): \"\"\" Check that an in-memory Pandas dataset matches a reference one",
"applied to both the actual and expected. max_permutation_cases is an optional number specifying",
"with open(actual_path) as fin: actual = fin.read() self.write_reference_result(actual, reference_path) def write_reference_files(self, actual_paths, reference_paths):",
"set_default_data_location class-method. If you haven't even defined the None default, and you make",
"contain parenthesised groups, and should only include explicit anchors if they need refer",
"file where actual dataframe originated, used for error messages. expected_path Optional parameter, giving",
"reference files is determined by the configuration via set_data_location(). kind is the reference",
"default_data_locations = {} @classmethod def set_defaults(cls, **kwargs): \"\"\" Set default parameters, at the",
"if they only differ in substrings that match one of these regular expressions.",
"is determined by the configuration via set_data_location(). actual_path Optional parameter, giving path for",
"max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False,",
"extra columns. sortby Option to specify fields to sort by before comparing. condition",
"the reference csv file. check_data Option to specify fields to compare values. check_types",
"the maximum number of permutations allowed; if the actual and expected lists differ",
"(which should evaluate as true for the test to pass) - a string",
"tests tend to take longer to run than traditional unit tests, so it",
"temporary files are written. Temporary files are created whenever a text file check",
"parameter, and returning a list of field names to use. The default csv",
"Internal method for resolving a list of reference data files, all of the",
"# set_regeneration() class-method. Can be initialized via the -w option. regenerate = {}",
"reference_path) def write_reference_files(self, actual_paths, reference_paths): \"\"\" Internal method for regenerating reference data for",
"specified. This method overrides any global defaults set from calls to the set_default_data_location",
"Stochastic Solutions Limited 2016 \"\"\" from __future__ import absolute_import from __future__ import print_function",
"whatever tempfile.gettempdir() returns, as appropriate. \"\"\" for k in kwargs: if k ==",
"which takes the same parameters as the standard pandas pd.read_csv() function. check_data Option",
"path. \"\"\" if self.reference_data_locations and not os.path.isabs(path): if kind not in self.reference_data_locations: kind",
"in some way; this function will be applied to both the actual and",
"set_data_location(). kind is the reference kind, used to locate the reference file. lstrip",
"if kind not in self.regenerate: kind = None return kind in self.regenerate and",
"to obtain a pandas dataframe. If None, then a default csv loader is",
"created. The following parameters can be set: verbose Sets the boolean verbose flag",
"r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r",
"regenerate flag is set to True, then the framework will regenerate reference data",
"a reference one from a saved reference csv file. df Actual dataframe. ref_csv",
"two are considered to be identical. This should be used for unstructured data",
"considered to be identical. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path)",
"comparison to all fields) - False (to skip that kind of comparison completely)",
"expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs)",
"msgs) = r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None,",
"import print_function from __future__ import division from __future__ import unicode_literals import os import",
"Option to specify fields to compare field order. check_extra_cols Option to specify fields",
"else: ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn) self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition,",
"Any additional named parameters are passed straight through to the csv_read_fn function. The",
"have the same signature as python's __future__ print function. If not specified, a",
"field names - a function taking a dataframe as its single parameter, and",
"files are written. Temporary files are created whenever a text file check fails",
"\"\"\" Class for comparing results against saved \"known to be correct\" reference results.",
"actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check",
"is determined by the configuration via set_data_location(). kind is the reference kind, used",
"@classmethod def set_default_data_location(self, location, kind=None): \"\"\" Declare the default filesystem location for reference",
"or a string as output. The main features are: - If the comparison",
"module, # so that all of its methods can be made available as",
"provides the ReferenceTestCase class, which is a subclass of, and drop-in replacement for",
"else: mpc = max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess,",
"or assertFileCorrect fail with 'preprocessing' # in place. This can be overridden using",
"kinds of reference data that hasn't had its location defined explicitly, then the",
"kind of reference file, globally, for all instances of the ReferenceTest class. If",
"tests as they happen, rather than waiting for the full report at the",
"string (to report details of how a test failed, if the value does",
"reference file. lstrip if set to true, both strings are left stripped before",
"only include explicit anchors if they need refer to the whole line. preprocess",
"run. This should be set using the # set_regeneration() class-method. Can be initialized",
"location for reference files of a particular kind. Typically you would subclass ReferenceTestCase",
"default, and you make calls to assertFileCorrect() (etc) using relative pathnames for the",
"before comparing. condition Filter to be applied to datasets before comparing. It can",
"actual_paths List of Actual csv files. ref_csvs List of names of matching reference",
"the same if they only differ in substrings that match one of these",
"instance of this class can masquerade as a module, # so that all",
"self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def",
"obviously, should be used only after careful checking that the new output is",
"after preprocessing has taken place, so preprocessed versions of the files are written",
"a default print function is used which writes unbuffered to sys.stdout. tmp_dir Sets",
"(failures, msgs) = r self.check_failures(failures, msgs) def assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False,",
"is the reference kind, used to locate the reference csv file. lstrip if",
"sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None,",
"can override it by passing in a print_fn parameter to __init__. \"\"\" print(*args,",
"the reference csv files. csv_read_fn A function to use to read a csv",
"available. \"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby,",
"read a csv file to obtain a pandas dataframe. If None, then a",
"= ignore_substrings ip = ignore_patterns mpc = max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None,",
"default csv loader is used, which takes the same parameters as the standard",
"= DEFAULT_FAIL_DIR # Dictionary describing which kinds of reference files should be #",
"expressions; lines will be considered to be the same if they only differ",
"not contain parenthesised groups, and should only include explicit anchors if they need",
"run than traditional unit tests, so it is often useful to be able",
"defined the None default, and you make calls to assertFileCorrect() (etc) using relative",
"the full report at the end. Verbose is set to True by default.",
"is set to True, then the framework will regenerate reference data of that",
"location for writing failing output # if assertStringCorrect or assertFileCorrect fail with 'preprocessing'",
"a string (to report details of how a test failed, if the value",
"kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path, expected_path, check_types=check_types, check_order=check_order, condition=condition,",
"NotImplementedError if Pandas is not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind):",
"path for file where expected dataframe originated, used for error messages. check_data Option",
"and self.print_fn: self.print_fn('Written %s' % reference_path) def check_failures(self, failures, msgs): \"\"\" Internal method",
"check_failures(self, failures, msgs): \"\"\" Internal method for check for failures and reporting them.",
"be regenerated. \"\"\" if kind not in self.regenerate: kind = None return kind",
"self.reference_data_locations and not os.path.isabs(path): if kind not in self.reference_data_locations: kind = None if",
"you can override it by passing in a print_fn parameter to __init__. \"\"\"",
"kwargs[k] else: raise Exception('set_defaults: Unrecogized option %s' % k) @classmethod def set_regeneration(cls, kind=None,",
"of decimal places to compare float values. loader Function to use to read",
"-*- \"\"\" referencetest.py: refererence testing for test-driven data analysis. Source repository: http://github.com/tdda/tdda License:",
"of files. \"\"\" for (actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self,",
"list of reference data files, all of the same kind. \"\"\" return [self.resolve_reference_path(p,",
"as a module, # so that all of its methods can be made",
"csv file. check_data Option to specify fields to compare values. check_types Option to",
"check_* comparison flags can be of any of the following: - None (to",
"they only differ in substrings that match one of these regular expressions. The",
"numbers and timestamps that vary in the output from run to run, but",
"should be looked for, if it has been specified using a relative path.",
"kind, used to locate the reference csv files. csv_read_fn A function to use",
"Actual dataframe. ref_df Expected dataframe. actual_path Optional parameter, giving path for file where",
"Reference kind, used to locate the reference csv file. csv_read_fn A function to",
"locate the reference csv file. check_data Option to specify fields to compare values.",
"traditional unit tests, so it is often useful to be able to see",
"location defined explicitly, then the default location is used. This is the location",
"set_data_location(). kind is the reference kind, used to locate the reference csv file.",
"self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip,",
"are passed straight through to the csv_read_fn function. The check_* comparison flags can",
"to this directory, and their pathnames are included in the failure messages. If",
"a print_fn parameter to __init__. \"\"\" print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush()",
"(failures, msgs) = r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path, ref_path, kind=None, lstrip=False, rstrip=False,",
"assertFileCorrect() (etc) are made for kinds of reference data that hasn't had its",
"the reference file is determined by the configuration via set_data_location(). kind is the",
"preprocessed versions of the files are written to this directory, and their pathnames",
"tests, so it is often useful to be able to see information from",
"be compared). precision Number of decimal places to compare float values. The check_*",
"zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path): \"\"\" Internal method for regenerating",
"Option to specify fields in the actual dataset to use to check that",
"kind, used to locate the reference csv file. lstrip if set to true,",
"check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that a csv file matches a",
"self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc = ignore_substrings ip = ignore_patterns mpc = max_permutation_cases",
"Name of reference csv file. The location of the reference file is determined",
"takes the same parameters as the standard pandas pd.read_csv() function. check_data Option to",
"for file where actual dataframe originated, used for error messages. kind Reference kind,",
"tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR is the default location",
"the tmp_dir property globally, to specify the directory where temporary files are written.",
"compare values. check_types Option to specify fields to compare typees. check_order Option to",
"true for the test to pass) - a string (to report details of",
"contents from a reference text file. actual_path is a path for a text",
"it defaults to /tmp, c:\\temp or whatever tempfile.gettempdir() returns, as appropriate. \"\"\" for",
"used to locate the reference csv file. lstrip if set to true, both",
"available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: ref_df =",
"are left stripped before the comparison is carried out. Note: the stripping on",
"lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def",
"function that takes a dataframe as its single parameter and returns a vector",
"calls to assertFileCorrect() (etc) using relative pathnames for the reference data files, then",
"should be used for unstructured data such as logfiles, etc. For csv files,",
"of reference files should be # regenerated when the tests are run. This",
"used to locate the reference csv file. check_data Option to specify fields to",
"preprocesses it in some way; this function will be applied to both the",
"groups, and should only include explicit anchors if they need refer to the",
"Filter to be applied to datasets before comparing. It can be None, or",
"pd.read_csv() function. check_data Option to specify fields to compare values. check_types Option to",
"\"\"\" Internal method for regenerating reference data. \"\"\" with open(actual_path) as fin: actual",
"each other, and the number of such permutations does not exceed this limit,",
"kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that an in-memory string",
"the regenerate flags are set to False by default. \"\"\" cls.regenerate[kind] = regenerate",
"0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework needs to print",
"text file. string is the actual string. ref_csv is the name of the",
"reporting of errors while running tests. Reference tests tend to take longer to",
"their lines are permutations of each other, and the number of such permutations",
"of decimal places to compare float values. The check_* comparison flags can be",
"kind in self.regenerate and self.regenerate[kind] def write_reference_file(self, actual_path, reference_path): \"\"\" Internal method for",
"method to determine if a particular kind of file should be regenerated. \"\"\"",
"locate the reference csv file. csv_read_fn A function to use to read a",
"is an optional list of regular expressions; lines will be considered to be",
"details of how a test failed, if the value does not evaluate as",
"when software produces either a (text or csv) file or a string as",
"not exceed this limit, then the two are considered to be identical. \"\"\"",
"= None if kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No",
"between the actual output and the expected output. - There is support for",
"set_data_location(). kind Reference kind, used to locate the reference csv file. csv_read_fn A",
"the -w option. regenerate = {} # Dictionary describing default location for reference",
"that their lines are permutations of each other, and the number of such",
"csv) file or a string as output. The main features are: - If",
"an exception. \"\"\" self.default_data_locations[kind] = location def __init__(self, assert_fn): \"\"\" Initializer for a",
"subclass of, and drop-in replacement for unittest.TestCase. It extends that class with all",
"describing default location for reference data, for # each kind. Can be initialized",
"as true for the test to pass) - a string (to report details",
"information while running tests. The function should have the same signature as python's",
"lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def",
"considered to be the same if they only differ in substrings that match",
"default csv loader function is a wrapper around pandas pd.read_csv(), with default options",
"report details of how a test failed, if the value does not evaluate",
"rows should be compared). precision Number of decimal places to compare float values.",
"\"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r = self.pandas.check_csv_file(actual_path,",
"to specify fields in the actual dataset to use to check that there",
"for regenerating reference data. \"\"\" with open(actual_path) as fin: actual = fin.read() self.write_reference_result(actual,",
"kwargs[k] elif k == 'print_fn': cls.print_fn = kwargs[k] elif k == 'tmp_dir': cls.tmp_dir",
"\"\"\" Internal method for check for failures and reporting them. \"\"\" self.assert_fn(failures ==",
"check_types Option to specify fields to compare typees. check_order Option to specify fields",
"ReferenceTestCase and pass in these locations though its __init__ method when constructing an",
"saved reference csv file. df Actual dataframe. ref_csv Name of reference csv file.",
"kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for p in paths] def should_regenerate(self, kind): \"\"\"",
"function. If not specified, a default print function is used which writes unbuffered",
"Expected dataframe. actual_path Optional parameter, giving path for file where actual dataframe originated,",
"Check that an in-memory Pandas dataframe matches an in-memory reference one. df Actual",
"dataframe. actual_path Optional parameter, giving path for file where actual dataframe originated, used",
"differ in substrings that match one of these regular expressions. The expressions must",
"before comparing. It can be None, or can be a function that takes",
"reference text files. actual_paths is a list of paths for text files. ref_paths",
"default filesystem location for reference files of a particular kind. This sets the",
"else: mpc = max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess,",
"be initialized via the -w option. regenerate = {} # Dictionary describing default",
"\"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_result(string, expected_path) else: ilc = ignore_substrings",
"It extends that class with all of the methods from ReferenceTest. The functionality",
"files. csv_read_fn A function to use to read a csv file to obtain",
"are considered to be identical. This should be used for unstructured data such",
"a superclass. If calls to assertFileCorrect() (etc) are made for kinds of reference",
"via the referencetestcase module. This provides the ReferenceTestCase class, which is a subclass",
"they need refer to the whole line. preprocess is an optional function that",
"or because the intended behaviour has changed. The functionality provided by this class",
"able to see the contents of the files after preprocessing has taken place,",
"reference csv file. check_data Option to specify fields to compare values. check_types Option",
"then a default csv loader is used. The check_* comparison flags can be",
"to use. The default csv loader function is a wrapper around pandas pd.read_csv(),",
"lines within the strings/files that contain particular patterns or regular expressions. This is",
"location of the reference file is determined by the configuration via set_data_location(). kind",
"used to locate the reference csv file. csv_read_fn A function to use to",
"fails and a 'preprocess' function has been specified. It's useful to be able",
"csv loader is used. The check_* comparison flags can be of any of",
"actual string is written to a file and a diff command is suggested",
"This can be overridden using the set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir())",
"# Dictionary describing default location for reference data, for # each kind. Can",
"reference_paths): \"\"\" Internal method for regenerating reference data for a list of files.",
"list of field names - a function taking a dataframe as its single",
"Typically you would subclass ReferenceTestCase and pass in these locations though its __init__",
"an in-memory Pandas dataset matches a reference one from a saved reference csv",
"tests. Reference tests tend to take longer to run than traditional unit tests,",
"provided by this class is available through python's standard unittest framework, via the",
"parameter, giving path for file where expected dataframe originated, used for error messages.",
"kind. Typically you would subclass ReferenceTestCase and pass in these locations though its",
"parameter, and returning a list of field names to use. Raises NotImplementedError if",
"for a list of files. \"\"\" for (actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path,",
"to specify fields to compare field order. check_extra_cols Option to specify fields in",
"to specify fields to compare values. check_types Option to specify fields to compare",
"where a reference data file should be looked for, if it has been",
"raise Exception('set_defaults: Unrecogized option %s' % k) @classmethod def set_regeneration(cls, kind=None, regenerate=True): \"\"\"",
"within the strings/files that contain particular patterns or regular expressions. This is typically",
"assertStringCorrect or assertFileCorrect fail with 'preprocessing' # in place. This can be overridden",
"to see information from failing tests as they happen, rather than waiting for",
"self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip,",
"file. ref_path is the name of the reference file. The location of the",
"self.assertDatasetsEqual(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path,",
"and will affect all instances of the ReferenceTest class subsequently created. The instance",
"flags are set to False by default. \"\"\" cls.regenerate[kind] = regenerate @classmethod def",
"keep_default_na is False Raises NotImplementedError if Pandas is not available. \"\"\" expected_paths =",
"Note: the stripping on a per-line basis. ignore_substrings is an optional list of",
"these locations though its __init__ method when constructing an instance of ReferenceTestCase as",
"the matching reference files. The location of the reference files is determined by",
"specifying the maximum number of permutations allowed; if the actual and expected lists",
"cls.verbose = kwargs[k] elif k == 'print_fn': cls.print_fn = kwargs[k] elif k ==",
"(text or csv) file or a string as output. The main features are:",
"is a path for a text file. ref_path is the name of the",
"python's __future__ print function. If not specified, a default print function is used",
"tmp_dir = DEFAULT_FAIL_DIR # Dictionary describing which kinds of reference files should be",
"default print function is used which writes unbuffered to sys.stdout. tmp_dir Sets the",
"locations though its __init__ method when constructing an instance of ReferenceTestCase as a",
"same if they only differ in substrings that match one of these regular",
"method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for comparing results against",
"ref_csv Name of reference csv file. The location of the reference file is",
"be able to see the contents of the files after preprocessing has taken",
"parameter to __init__. \"\"\" print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout) outfile.flush() # Default",
"true). \"\"\" self.assert_fn = assert_fn self.reference_data_locations = dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files",
"basis. ignore_substrings is an optional list of substrings; lines containing any of these",
"max_permutation_cases is an optional number specifying the maximum number of permutations allowed; if",
"data analysis. Source repository: http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic Solutions Limited 2016",
"to pass) - a string (to report details of how a test failed,",
"ref_path, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a file",
"sort by before comparing. condition Filter to be applied to datasets before comparing.",
"location for reference files of a particular kind. This sets the location globally,",
"files, use assertCSVFileCorrect instead. \"\"\" expected_paths = self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths)",
"the comparison. ignore_patterns is an optional list of regular expressions; lines will be",
"reference_path) def check_failures(self, failures, msgs): \"\"\" Internal method for check for failures and",
"after careful checking that the new output is correct, either because the previous",
"default location for reference data, for # each kind. Can be initialized by",
"overridden using the set_defaults() class method. DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\"",
"to datasets before comparing. It can be None, or can be a function",
"standard pandas pd.read_csv() function. check_data Option to specify fields to compare values. check_types",
"sys import tempfile from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import FilesComparison # DEFAULT_FAIL_DIR",
"reference data that hasn't had its location defined explicitly, then the default location",
"used to locate the reference csv files. csv_read_fn A function to use to",
"for unit-tests. It should take two parameters: - a value (which should evaluate",
"a csv file to obtain a pandas dataframe. If None, then a default",
"output was in fact wrong, or because the intended behaviour has changed. The",
"self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs)",
"r self.check_failures(failures, msgs) def resolve_reference_path(self, path, kind=None): \"\"\" Internal method for deciding where",
"should evaluate as true for the test to pass) - a string (to",
"the stripping on a per-line basis. rstrip if set to true, both strings",
"\"\"\" Internal method for regenerating reference data from in-memory results. \"\"\" with open(reference_path,",
"write_reference_file(self, actual_path, reference_path): \"\"\" Internal method for regenerating reference data. \"\"\" with open(actual_path)",
"be correct\" reference results. This is typically useful when software produces either a",
"though its __init__ method when constructing an instance of ReferenceTestCase as a superclass.",
"in a print_fn parameter to __init__. \"\"\" print(*args, **kwargs) outfile = kwargs.get('file', sys.stdout)",
"then it can't check correctness, so it will raise an exception. \"\"\" self.reference_data_locations[kind]",
"all of its methods can be made available as top-level functions, # to",
"override it by passing in a print_fn parameter to __init__. \"\"\" print(*args, **kwargs)",
"set_data_location(). kind Reference kind, used to locate the reference csv files. csv_read_fn A",
"is a wrapper around pandas pd.read_csv(), with default options as follows: index_col is",
"a reference data file should be looked for, if it has been specified",
"def should_regenerate(self, kind): \"\"\" Internal method to determine if a particular kind of",
"used to locate the reference file. lstrip if set to true, both strings",
"other, and the number of such permutations does not exceed this limit, then",
"\"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs)) @staticmethod def default_print_fn(*args, **kwargs): \"\"\" Sometimes the framework",
"regenerated when the tests are run. This should be set using the #",
"so it is often useful to be able to see information from failing",
"contents of the files after preprocessing has taken place, so preprocessed versions of",
"self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases r = self.files.check_file(actual_path,",
"the set_default_data_location class-method. If you haven't even defined the None default, and you",
"ignore_substrings is an optional list of substrings; lines containing any of these substrings",
"a particular kind. This sets the location globally, and will affect all instances",
"the comparison is carried out. Note: the stripping on a per-line basis. ignore_substrings",
"self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None):",
"print_fn = default_print_fn # Magic so that an instance of this class can",
"kind=kind) for p in paths] def should_regenerate(self, kind): \"\"\" Internal method to determine",
"is used which writes unbuffered to sys.stdout. tmp_dir Sets the tmp_dir property globally,",
"an optional list of regular expressions; lines will be considered to be the",
"= self.resolve_reference_paths(ref_paths, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: mpc = max_permutation_cases r =",
"if set to true, both strings are right stripped before the comparison is",
"for an individual instance of the class. If calls to assertFileCorrect() (etc) are",
"\"\"\" return [self.resolve_reference_path(p, kind=kind) for p in paths] def should_regenerate(self, kind): \"\"\" Internal",
"be specified. This method overrides any global defaults set from calls to the",
"to run than traditional unit tests, so it is often useful to be",
"kwargs: if k == 'verbose': cls.verbose = kwargs[k] elif k == 'print_fn': cls.print_fn",
"*must* be specified. If you haven't even defined the None default, and you",
"using relative pathnames for the reference data files, then it can't check correctness,",
"location def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None):",
"True, then the framework will regenerate reference data of that kind, rather than",
"a string and a file fails, the actual string is written to a",
"Sometimes the framework needs to print messages. By default, it will use this",
"file or a string as output. The main features are: - If the",
"comparison. ignore_patterns is an optional list of regular expressions; lines will be considered",
"as the standard pandas pd.read_csv() function. check_data Option to specify fields to compare",
"end. Verbose is set to True by default. print_fn Sets the print function",
"the directory where temporary files are written. Temporary files are created whenever a",
"a value (which should evaluate as true for the test to pass) -",
"to compare float values. **kwargs Any additional named parameters are passed straight through",
"a reference one. actual_paths List of Actual csv files. ref_csvs List of names",
"comparing. All of the regenerate flags are set to False by default. \"\"\"",
"float values. The check_* comparison flags can be of any of the following:",
"to locate the reference csv file. lstrip if set to true, both strings",
"to locate the reference csv file. check_data Option to specify fields to compare",
"for (actual_path, expected_path) in zip(actual_paths, reference_paths): self.write_reference_file(actual_path, reference_path) def write_reference_result(self, result, reference_path): \"\"\"",
"# Dictionary describing which kinds of reference files should be # regenerated when",
"= dict(self.default_data_locations) self.pandas = PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self,",
"preprocess is an optional function that takes a list of strings and preprocesses",
"\"\" quoting is csv.QUOTE_MINIMAL escapechar is \\ na_values are the empty string, NaN,",
"instances of the ReferenceTest class. If the regenerate flag is set to True,",
"longer to run than traditional unit tests, so it is often useful to",
"is a subclass of, and drop-in replacement for unittest.TestCase. It extends that class",
"location declared for kind=None, which *must* be specified. This method overrides any global",
"string is the actual string. ref_csv is the name of the reference csv",
"and returning a list of field names to use. The default csv loader",
"= r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None,",
"assertStringCorrect(self, string, ref_csv, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that",
"in-memory string matches the contents from a reference text file. string is the",
"specified, a default print function is used which writes unbuffered to sys.stdout. tmp_dir",
"precision=None, **kwargs): \"\"\" Check that a csv file matches a reference one. actual_paths",
"that vary in the output from run to run, but which do not",
"see information from failing tests as they happen, rather than waiting for the",
"per-line basis. ignore_substrings is an optional list of substrings; lines containing any of",
"print function globally, to specify the function to use to display information while",
"from a reference text file. actual_path is a path for a text file.",
"values. The check_* comparison flags can be of any of the following: -",
"csv file. The location of the reference files is determined by the configuration",
"output is correct, either because the previous output was in fact wrong, or",
"typically useful for filtering out things like version numbers and timestamps that vary",
"\"\"\" with open(reference_path, 'w') as fout: fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written %s'",
"lines will be considered to be the same if they only differ in",
"assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\"",
"Internal method for deciding where a reference data file should be looked for,",
"check_order=None, condition=None, sortby=None, precision=None): \"\"\" Check that an in-memory Pandas dataframe matches an",
"by the configuration via set_data_location(). kind Reference kind, used to locate the reference",
"applied to datasets before comparing. It can be None, or can be a",
"def set_regeneration(cls, kind=None, regenerate=True): \"\"\" Set the regeneration flag for a particular kind",
"and you make calls to assertFileCorrect() (etc) using relative pathnames for the reference",
"a list of field names to use. Raises NotImplementedError if Pandas is not",
"typees. check_order Option to specify fields to compare field order. check_extra_cols Option to",
"files. ref_csvs List of names of matching reference csv file. The location of",
"when the tests are run. This should be set using the # set_regeneration()",
"is used. This is the location declared for kind=None, which *must* be specified.",
"same kind. \"\"\" return [self.resolve_reference_path(p, kind=kind) for p in paths] def should_regenerate(self, kind):",
"for seeing the differences between the actual output and the expected output. -",
"string. ref_csv is the name of the reference csv file. The location of",
"not exceed this limit, then the two are considered to be identical. This",
"in the failure messages. If not explicitly set by set_defaults(), the environment variable",
"pathnames for the reference data files, then it can't check correctness, so it",
"compare field order. check_extra_cols Option to specify fields in the actual dataset to",
"and a 'preprocess' function has been specified. It's useful to be able to",
"available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: r =",
"use. Raises NotImplementedError if Pandas is not available. \"\"\" r = self.pandas.check_dataframe(df, ref_df,",
"kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path) else: raise Exception('No reference data location",
"error messages. expected_path Optional parameter, giving path for file where expected dataframe originated,",
"float values. **kwargs Any additional named parameters are passed straight through to the",
"available as top-level functions, # to work will with frameworks like pytest. ReferenceTest.__all__",
"written. Temporary files are created whenever a text file check fails and a",
"location, kind=None): \"\"\" Declare the filesystem location for reference files of a particular",
"around pandas pd.read_csv(), with default options as follows: index_col is None infer_datetime_format is",
"for test-driven data analysis. Source repository: http://github.com/tdda/tdda License: MIT Copyright (c) Stochastic Solutions",
"a ReferenceTest instance. assert_fn Function to be used to make assertions for unit-tests.",
"This method overrides any global defaults set from calls to the set_default_data_location class-method.",
"precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None,",
"of how a test failed, if the value does not evaluate as true).",
"datasets before comparing. It can be None, or can be a function that",
"sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv',",
"strings are left stripped before the comparison is carried out. Note: the stripping",
"of these regular expressions. The expressions must not contain parenthesised groups, and should",
"default_print_fn # Magic so that an instance of this class can masquerade as",
"files of a particular kind. This sets the location globally, and will affect",
"waiting for the full report at the end. Verbose is set to True",
"unit tests, so it is often useful to be able to see information",
"be None, or can be a function that takes a dataframe as its",
"set_data_location(). actual_path Optional parameter, giving path for file where actual dataframe originated, used",
"expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if self.should_regenerate(kind): self.write_reference_files(actual_paths, expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths,",
"open(reference_path, 'w') as fout: fout.write(result) if self.verbose and self.print_fn: self.print_fn('Written %s' % reference_path)",
"of this class can masquerade as a module, # so that all of",
"ReferenceTest class subsequently created. The instance method set_data_location() can be used to set",
"instance of the class. If calls to assertFileCorrect() (etc) are made for kinds",
"PandasComparison(print_fn=self.print_fn, verbose=self.verbose) self.files = FilesComparison(print_fn=self.print_fn, verbose=self.verbose, tmp_dir=self.tmp_dir) def set_data_location(self, location, kind=None): \"\"\" Declare",
"expressions. The expressions must not contain parenthesised groups, and should only include explicit",
"Declare the filesystem location for reference files of a particular kind. Typically you",
"where expected dataframe originated, used for error messages. check_data Option to specify fields",
"file. The location of the reference files is determined by the configuration via",
"- There is support for re-writing the reference output with the actual output.",
"k == 'verbose': cls.verbose = kwargs[k] elif k == 'print_fn': cls.print_fn = kwargs[k]",
"reference csv file. csv_read_fn A function to use to read a csv file",
"matching reference files. The location of the reference files is determined by the",
"haven't even defined the None default, and you make calls to assertFileCorrect() (etc)",
"of decimal places to compare float values. **kwargs Any additional named parameters are",
"def set_data_location(self, location, kind=None): \"\"\" Declare the filesystem location for reference files of",
"list of field names to use. The default csv loader function is a",
"set_defaults(cls, **kwargs): \"\"\" Set default parameters, at the class level. These defaults will",
"def set_defaults(cls, **kwargs): \"\"\" Set default parameters, at the class level. These defaults",
"whenever a text file check fails and a 'preprocess' function has been specified.",
"-*- coding: utf-8 -*- \"\"\" referencetest.py: refererence testing for test-driven data analysis. Source",
"cls.regenerate[kind] = regenerate @classmethod def set_default_data_location(self, location, kind=None): \"\"\" Declare the default filesystem",
"defined explicitly, then the default location is used. This is the location declared",
"check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertCSVFilesCorrect(self,",
"for p in paths] def should_regenerate(self, kind): \"\"\" Internal method to determine if",
"function is used which writes unbuffered to sys.stdout. tmp_dir Sets the tmp_dir property",
"to locate the reference file. lstrip if set to true, both strings are",
"c:\\temp or whatever tempfile.gettempdir() returns, as appropriate. \"\"\" for k in kwargs: if",
"in-memory Pandas dataset matches a reference one from a saved reference csv file.",
"expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None,",
"etc. For csv files, use assertCSVFileCorrect instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if",
"of reference file, globally, for all instances of the ReferenceTest class. If the",
"same parameters as the standard pandas pd.read_csv() function. check_data Option to specify fields",
"run, but which do not indicate a problem. - There is support for",
"instead. \"\"\" expected_path = self.resolve_reference_path(ref_path, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else: mpc =",
"Sets the print function globally, to specify the function to use to display",
"ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths,",
"def resolve_reference_path(self, path, kind=None): \"\"\" Internal method for deciding where a reference data",
"to be used to make assertions for unit-tests. It should take two parameters:",
"file, globally, for all instances of the ReferenceTest class. If the regenerate flag",
"out things like version numbers and timestamps that vary in the output from",
"None (to apply that kind of comparison to all fields) - False (to",
"a dataframe as its single parameter and returns a vector of booleans (to",
"\"\"\" r = self.pandas.check_dataframe(df, ref_df, actual_path=actual_path, expected_path=expected_path, check_data=check_data, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision)",
"available through the pytest framework, via the referencepytest module. This module provides all",
"check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that a csv file",
"returning a list of field names to use. Raises NotImplementedError if Pandas is",
"to compare values. check_types Option to specify fields to compare typees. check_order Option",
"so it will raise an exception. \"\"\" self.default_data_locations[kind] = location def __init__(self, assert_fn):",
"Note: the stripping on a per-line basis. rstrip if set to true, both",
"and a diff command is suggested for seeing the differences between the actual",
"take longer to run than traditional unit tests, so it is often useful",
"set_defaults(), the environment variable TDDA_FAIL_DIR is used, or, if that is not defined,",
"places to compare float values. loader Function to use to read a csv",
"self.reference_data_locations[kind] = location def assertDatasetsEqual(self, df, ref_df, actual_path=None, expected_path=None, check_data=None, check_types=None, check_order=None, condition=None,",
"the ReferenceTest class subsequently created. The instance method set_data_location() can be used to",
"which *must* be specified. This method overrides any global defaults set from calls",
"self.verbose and self.print_fn: self.print_fn('Written %s' % reference_path) def check_failures(self, failures, msgs): \"\"\" Internal",
"check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs) = r self.check_failures(failures, msgs) def assertDatasetCorrect(self, df,",
"Exception('No reference data location for \"%s\"' % kind) return path def resolve_reference_paths(self, paths,",
"will use this print function, but you can override it by passing in",
"a csv file matches a reference one. actual_path Actual csv file. ref_csv Name",
"csv loader is used, which takes the same parameters as the standard pandas",
"used, which takes the same parameters as the standard pandas pd.read_csv() function. check_data",
"configuration via set_data_location(). kind is the reference kind, used to locate the reference",
"ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r self.check_failures(failures, msgs) def assertFileCorrect(self, actual_path,",
"either a (text or csv) file or a string as output. The main",
"the reference file is determined by the configuration via set_data_location(). kind Reference kind,",
"or regular expressions. This is typically useful for filtering out things like version",
"file. df Actual dataframe. ref_csv Name of reference csv file. The location of",
"Actual csv file. ref_csv Name of reference csv file. The location of the",
"that hasn't had its location defined explicitly, then the default location is used.",
"Check that a csv file matches a reference one. actual_paths List of Actual",
"condition Filter to be applied to datasets before comparing. It can be None,",
"created whenever a text file check fails and a 'preprocess' function has been",
"writing failing output # if assertStringCorrect or assertFileCorrect fail with 'preprocessing' # in",
"all instances of ReferenceTest subsequently created. The following parameters can be set: verbose",
"@classmethod def set_defaults(cls, **kwargs): \"\"\" Set default parameters, at the class level. These",
"in fact wrong, or because the intended behaviour has changed. The functionality provided",
"def assertCSVFilesCorrect(self, actual_paths, ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs):",
"a reference text file. string is the actual string. ref_csv is the name",
"can be used to set the per-kind data locations for an individual instance",
"**kwargs Any additional named parameters are passed straight through to the csv_read_fn function.",
"os import sys import tempfile from tdda.referencetest.checkpandas import PandasComparison from tdda.referencetest.checkfiles import FilesComparison",
"saved \"known to be correct\" reference results. This is typically useful when software",
"that is not defined, it defaults to /tmp, c:\\temp or whatever tempfile.gettempdir() returns,",
"Raises NotImplementedError if Pandas is not available. \"\"\" expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind) if",
"resolving a list of reference data files, all of the same kind. \"\"\"",
"NULL keep_default_na is False Raises NotImplementedError if Pandas is not available. \"\"\" expected_path",
"for a text file. ref_path is the name of the reference file. The",
"of any of the following: - None (to apply that kind of comparison",
"to display information while running tests. The function should have the same signature",
"for # each kind. Can be initialized by set_default_data_location(). default_data_locations = {} @classmethod",
"substrings that match one of these regular expressions. The expressions must not contain",
"Pandas dataframe matches an in-memory reference one. df Actual dataframe. ref_df Expected dataframe.",
"comparison is carried out. Note: the stripping on a per-line basis. rstrip if",
"NotImplementedError if Pandas is not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind):",
"output from run to run, but which do not indicate a problem. -",
"reference text file. actual_path is a path for a text file. ref_path is",
"if self.reference_data_locations and not os.path.isabs(path): if kind not in self.reference_data_locations: kind = None",
"that all of its methods can be made available as top-level functions, #",
"of comparison completely) - a list of field names - a function taking",
"(failures, msgs) = r self.check_failures(failures, msgs) def assertFilesCorrect(self, actual_paths, ref_paths, kind=None, lstrip=False, rstrip=False,",
"comparing results against saved \"known to be correct\" reference results. This is typically",
"they happen, rather than waiting for the full report at the end. Verbose",
"apply to all instances of ReferenceTest subsequently created. The following parameters can be",
"Number of decimal places to compare float values. The check_* comparison flags can",
"to compare float values. loader Function to use to read a csv file",
"there are no unexpected extra columns. sortby Option to specify fields to sort",
"check_order Option to specify fields to compare field order. check_extra_cols Option to specify",
"return path def resolve_reference_paths(self, paths, kind=None): \"\"\" Internal method for resolving a list",
"self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) = r",
"for reference files of a particular kind. This sets the location globally, and",
"= {} # Dictionary describing default location for reference data, for # each",
"in self.reference_data_locations: kind = None if kind in self.reference_data_locations: path = os.path.join(self.reference_data_locations[kind], path)",
"mpc = max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc)",
"default options as follows: index_col is None infer_datetime_format is True quotechar is \"\"",
"else: raise Exception('No reference data location for \"%s\"' % kind) return path def",
"specify the function to use to display information while running tests. The function",
"= os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir()) class ReferenceTest(object): \"\"\" Class for comparing results against saved \"known",
"if the actual and expected lists differ only in that their lines are",
"list of field names to use. Raises NotImplementedError if Pandas is not available.",
"a subclass of, and drop-in replacement for unittest.TestCase. It extends that class with",
"decimal places to compare float values. **kwargs Any additional named parameters are passed",
"method for check for failures and reporting them. \"\"\" self.assert_fn(failures == 0, '\\n'.join(msgs))",
"the end. Verbose is set to True by default. print_fn Sets the print",
"and returning a list of field names to use. Raises NotImplementedError if Pandas",
"comparison flags can be of any of the following: - None (to apply",
"names to use. Raises NotImplementedError if Pandas is not available. \"\"\" r =",
"as python's __future__ print function. If not specified, a default print function is",
"return [self.resolve_reference_path(p, kind=kind) for p in paths] def should_regenerate(self, kind): \"\"\" Internal method",
"the actual and expected lists differ only in that their lines are permutations",
"The expressions must not contain parenthesised groups, and should only include explicit anchors",
"r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures, msgs) =",
"path) else: raise Exception('No reference data location for \"%s\"' % kind) return path",
"then it can't check correctness, so it will raise an exception. \"\"\" self.default_data_locations[kind]",
"expected_paths) else: r = self.pandas.check_csv_files(actual_paths, expected_paths, check_types=check_types, check_order=check_order, condition=condition, sortby=sortby, precision=precision) (failures, msgs)",
"class. If the regenerate flag is set to True, then the framework will",
"available through python's standard unittest framework, via the referencetestcase module. This provides the",
"collection of files matche the contents from matching collection of reference text files.",
"Internal method for regenerating reference data. \"\"\" with open(actual_path) as fin: actual =",
"self.print_fn: self.print_fn('Written %s' % reference_path) def check_failures(self, failures, msgs): \"\"\" Internal method for",
"referencetestcase module. This provides the ReferenceTestCase class, which is a subclass of, and",
"following: - None (to apply that kind of comparison to all fields) -",
"reference csv file. lstrip if set to true, both strings are left stripped",
"max_permutation_cases r = self.files.check_string_against_file(string, expected_path, actual_path=None, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ilc, ignore_patterns=ip, preprocess=preprocess, max_permutation_cases=mpc) (failures,",
"fields to sort by before comparing. condition Filter to be applied to datasets",
"unbuffered to sys.stdout. tmp_dir Sets the tmp_dir property globally, to specify the directory",
"an optional number specifying the maximum number of permutations allowed; if the actual",
"instances of ReferenceTest subsequently created. The following parameters can be set: verbose Sets",
"= max_permutation_cases r = self.files.check_files(actual_paths, expected_paths, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings, ignore_patterns=ignore_patterns, preprocess=preprocess, max_permutation_cases=mpc) (failures,",
"unexpected extra columns. sortby Option to specify fields to sort by before comparing.",
"the number of such permutations does not exceed this limit, then the two",
"self.write_reference_file(actual_path, expected_path) else: mpc = max_permutation_cases r = self.files.check_file(actual_path, expected_path, lstrip=lstrip, rstrip=rstrip, ignore_substrings=ignore_substrings,",
"kind not in self.regenerate: kind = None return kind in self.regenerate and self.regenerate[kind]",
"superclass. If calls to assertFileCorrect() (etc) are made for kinds of reference data",
"This module provides all of the methods from ReferenceTest, as functions that can",
"**kwargs): \"\"\" Set default parameters, at the class level. These defaults will apply",
"check_order=check_order, condition=condition, sortby=sortby, precision=precision) def assertCSVFileCorrect(self, actual_path, ref_csv, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None,",
"that a csv file matches a reference one. actual_path Actual csv file. ref_csv",
"defined, it defaults to /tmp, c:\\temp or whatever tempfile.gettempdir() returns, as appropriate. \"\"\"",
"reference results. This is typically useful when software produces either a (text or",
"following parameters can be set: verbose Sets the boolean verbose flag globally, to",
"vary in the output from run to run, but which do not indicate",
"# if assertStringCorrect or assertFileCorrect fail with 'preprocessing' # in place. This can",
"__future__ import print_function from __future__ import division from __future__ import unicode_literals import os",
"so that an instance of this class can masquerade as a module, #",
"collection of reference text files. actual_paths is a list of paths for text",
"correctness, so it will raise an exception. \"\"\" self.default_data_locations[kind] = location def __init__(self,",
"ref_csvs, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that",
"actual_paths is a list of paths for text files. ref_paths is a list",
"of the regenerate flags are set to False by default. \"\"\" cls.regenerate[kind] =",
"of field names to use. The default csv loader function is a wrapper",
"part of a pytest suite. \"\"\" # Verbose flag verbose = True #",
"reference file is determined by the configuration via set_data_location(). kind Reference kind, used",
"os.path.isabs(path): if kind not in self.reference_data_locations: kind = None if kind in self.reference_data_locations:",
"= default_print_fn # Magic so that an instance of this class can masquerade",
"files are created whenever a text file check fails and a 'preprocess' function",
"use to display information while running tests. The function should have the same",
"will apply to all instances of ReferenceTest subsequently created. The following parameters can",
"specify fields to compare typees. check_order Option to specify fields to compare field",
"is not available. \"\"\" expected_path = self.resolve_reference_path(ref_csv, kind=kind) if self.should_regenerate(kind): self.write_reference_file(actual_path, expected_path) else:",
"optional function that takes a list of strings and preprocesses it in some",
"data from in-memory results. \"\"\" with open(reference_path, 'w') as fout: fout.write(result) if self.verbose",
"like version numbers and timestamps that vary in the output from run to",
"is used, or, if that is not defined, it defaults to /tmp, c:\\temp",
"\"\"\" Internal method to determine if a particular kind of file should be",
"check_data=None, check_types=None, check_order=None, condition=None, sortby=None, precision=None, **kwargs): \"\"\" Check that an in-memory Pandas",
"is None infer_datetime_format is True quotechar is \"\" quoting is csv.QUOTE_MINIMAL escapechar is",
"output. This, obviously, should be used only after careful checking that the new",
"need refer to the whole line. preprocess is an optional function that takes",
"ref_paths, kind=None, lstrip=False, rstrip=False, ignore_substrings=None, ignore_patterns=None, preprocess=None, max_permutation_cases=0): \"\"\" Check that a collection",
"defaults to /tmp, c:\\temp or whatever tempfile.gettempdir() returns, as appropriate. \"\"\" for k",
"data files, then it can't check correctness, so it will raise an exception.",
"\"known to be correct\" reference results. This is typically useful when software produces",
"command is suggested for seeing the differences between the actual output and the",
"-w option. regenerate = {} # Dictionary describing default location for reference data,",
"any of these substrings will be ignored in the comparison. ignore_patterns is an",
"a relative path. \"\"\" if self.reference_data_locations and not os.path.isabs(path): if kind not in",
"self.check_failures(failures, msgs) def assertDatasetCorrect(self, df, ref_csv, actual_path=None, kind='csv', csv_read_fn=None, check_data=None, check_types=None, check_order=None, condition=None,",
"optional list of substrings; lines containing any of these substrings will be ignored"
] |
[
"map mode generates texture coordinates (s,t,r) matching the vertex's eye-space reflection vector. The",
"The reflection map mode is useful for environment mapping without the singularity inherent",
"to provide a more Python-friendly API Overview (from the spec) This extension provides",
"useful for sophisticated cube map texturing-based diffuse lighting models. The official definition of",
"arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets",
"boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension(",
"for sophisticated cube map texturing-based diffuse lighting models. The official definition of this",
"The normal map mode is useful for sophisticated cube map texturing-based diffuse lighting",
"import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating whether this",
"constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types,",
"normal map mode generates texture coordinates (s,t,r) matching the vertex's transformed eye-space normal.",
"vector. The reflection map mode is useful for environment mapping without the singularity",
"lighting and environment mapping. The reflection map mode generates texture coordinates (s,t,r) matching",
"'''OpenGL extension NV.texgen_reflection This module customises the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide",
"Python-friendly API Overview (from the spec) This extension provides two new texture coordinate",
"modes that are useful texture-based lighting and environment mapping. The reflection map mode",
"generation modes that are useful texture-based lighting and environment mapping. The reflection map",
"_glgets from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean",
"The normal map mode generates texture coordinates (s,t,r) matching the vertex's transformed eye-space",
"OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection",
"wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import * from",
"extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform, constant, arrays from",
"available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION",
"import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating whether this extension is available''' from",
"this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ###",
"vertex's transformed eye-space normal. The normal map mode is useful for sophisticated cube",
"new texture coordinate generation modes that are useful texture-based lighting and environment mapping.",
"official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import",
"generates texture coordinates (s,t,r) matching the vertex's transformed eye-space normal. The normal map",
"indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME",
"mode generates texture coordinates (s,t,r) matching the vertex's eye-space reflection vector. The reflection",
"in sphere mapping. The normal map mode generates texture coordinates (s,t,r) matching the",
"import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV():",
"reflection map mode generates texture coordinates (s,t,r) matching the vertex's eye-space reflection vector.",
"map mode generates texture coordinates (s,t,r) matching the vertex's transformed eye-space normal. The",
"environment mapping without the singularity inherent in sphere mapping. The normal map mode",
"mapping. The reflection map mode generates texture coordinates (s,t,r) matching the vertex's eye-space",
"eye-space reflection vector. The reflection map mode is useful for environment mapping without",
"customises the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly API Overview",
"texturing-based diffuse lighting models. The official definition of this extension is available here:",
"is useful for environment mapping without the singularity inherent in sphere mapping. The",
"* from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating whether this extension",
"the spec) This extension provides two new texture coordinate generation modes that are",
"reflection map mode is useful for environment mapping without the singularity inherent in",
"cube map texturing-based diffuse lighting models. The official definition of this extension is",
"_EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating whether this extension is available''' from OpenGL",
"from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from",
"mode is useful for sophisticated cube map texturing-based diffuse lighting models. The official",
"mapping. The normal map mode generates texture coordinates (s,t,r) matching the vertex's transformed",
"eye-space normal. The normal map mode is useful for sophisticated cube map texturing-based",
"ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import",
"module customises the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly API",
"This extension provides two new texture coordinate generation modes that are useful texture-based",
"matching the vertex's transformed eye-space normal. The normal map mode is useful for",
"of the OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly API Overview (from the spec)",
"extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END",
"of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform, constant,",
"the vertex's eye-space reflection vector. The reflection map mode is useful for environment",
"a more Python-friendly API Overview (from the spec) This extension provides two new",
"singularity inherent in sphere mapping. The normal map mode generates texture coordinates (s,t,r)",
"(s,t,r) matching the vertex's transformed eye-space normal. The normal map mode is useful",
"mode is useful for environment mapping without the singularity inherent in sphere mapping.",
"whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME )",
"texture-based lighting and environment mapping. The reflection map mode generates texture coordinates (s,t,r)",
"the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly API Overview (from",
"more Python-friendly API Overview (from the spec) This extension provides two new texture",
"normal. The normal map mode is useful for sophisticated cube map texturing-based diffuse",
"models. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from",
"useful for environment mapping without the singularity inherent in sphere mapping. The normal",
"The reflection map mode generates texture coordinates (s,t,r) matching the vertex's eye-space reflection",
"OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating whether",
"(s,t,r) matching the vertex's eye-space reflection vector. The reflection map mode is useful",
"extension NV.texgen_reflection This module customises the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide a",
"lighting models. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt '''",
"OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly API Overview (from the spec) This extension",
"texture coordinates (s,t,r) matching the vertex's transformed eye-space normal. The normal map mode",
"definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform,",
"OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from",
"sophisticated cube map texturing-based diffuse lighting models. The official definition of this extension",
"available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform, constant, arrays from OpenGL import",
"inherent in sphere mapping. The normal map mode generates texture coordinates (s,t,r) matching",
"import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection",
"coordinates (s,t,r) matching the vertex's transformed eye-space normal. The normal map mode is",
"'''Return boolean indicating whether this extension is available''' from OpenGL import extensions return",
"vertex's eye-space reflection vector. The reflection map mode is useful for environment mapping",
"import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import",
"useful texture-based lighting and environment mapping. The reflection map mode generates texture coordinates",
"texture coordinate generation modes that are useful texture-based lighting and environment mapping. The",
"API Overview (from the spec) This extension provides two new texture coordinate generation",
"''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import",
"extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import *",
"the singularity inherent in sphere mapping. The normal map mode generates texture coordinates",
"coordinate generation modes that are useful texture-based lighting and environment mapping. The reflection",
"OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating whether this extension is available'''",
"coordinates (s,t,r) matching the vertex's eye-space reflection vector. The reflection map mode is",
"map mode is useful for environment mapping without the singularity inherent in sphere",
"and environment mapping. The reflection map mode generates texture coordinates (s,t,r) matching the",
"NV.texgen_reflection This module customises the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide a more",
"environment mapping. The reflection map mode generates texture coordinates (s,t,r) matching the vertex's",
"This module customises the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly",
"from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes",
"mapping without the singularity inherent in sphere mapping. The normal map mode generates",
"import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL",
"without the singularity inherent in sphere mapping. The normal map mode generates texture",
"the OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly API Overview (from the spec) This",
"matching the vertex's eye-space reflection vector. The reflection map mode is useful for",
"provides two new texture coordinate generation modes that are useful texture-based lighting and",
"transformed eye-space normal. The normal map mode is useful for sophisticated cube map",
"sphere mapping. The normal map mode generates texture coordinates (s,t,r) matching the vertex's",
"texture coordinates (s,t,r) matching the vertex's eye-space reflection vector. The reflection map mode",
"for environment mapping without the singularity inherent in sphere mapping. The normal map",
"extension provides two new texture coordinate generation modes that are useful texture-based lighting",
"generates texture coordinates (s,t,r) matching the vertex's eye-space reflection vector. The reflection map",
"two new texture coordinate generation modes that are useful texture-based lighting and environment",
"diffuse lighting models. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt",
"map mode is useful for sophisticated cube map texturing-based diffuse lighting models. The",
"from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME",
"here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions,",
"that are useful texture-based lighting and environment mapping. The reflection map mode generates",
"provide a more Python-friendly API Overview (from the spec) This extension provides two",
"is useful for sophisticated cube map texturing-based diffuse lighting models. The official definition",
"this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform, constant, arrays",
"_types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return",
"map texturing-based diffuse lighting models. The official definition of this extension is available",
"is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED",
"OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def",
"glInitTexgenReflectionNV(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions",
"spec) This extension provides two new texture coordinate generation modes that are useful",
"the vertex's transformed eye-space normal. The normal map mode is useful for sophisticated",
"platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import",
"from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating whether this extension is",
"http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper",
"def glInitTexgenReflectionNV(): '''Return boolean indicating whether this extension is available''' from OpenGL import",
"behaviour of the OpenGL.raw.GL.NV.texgen_reflection to provide a more Python-friendly API Overview (from the",
"is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL import platform, constant, arrays from OpenGL",
"reflection vector. The reflection map mode is useful for environment mapping without the",
"The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_reflection.txt ''' from OpenGL",
"from OpenGL.raw.GL.NV.texgen_reflection import * from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME def glInitTexgenReflectionNV(): '''Return boolean indicating",
"<gh_stars>0 '''OpenGL extension NV.texgen_reflection This module customises the behaviour of the OpenGL.raw.GL.NV.texgen_reflection to",
"are useful texture-based lighting and environment mapping. The reflection map mode generates texture",
"Overview (from the spec) This extension provides two new texture coordinate generation modes",
"(from the spec) This extension provides two new texture coordinate generation modes that",
"mode generates texture coordinates (s,t,r) matching the vertex's transformed eye-space normal. The normal",
"normal map mode is useful for sophisticated cube map texturing-based diffuse lighting models."
] |
[
"os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir): name, ext = os.path.splitext(file_name) if ext in",
"extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir): name, ext =",
"os PATH_TO_DATASETS = './mlpractice/datasets/' class DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir =",
"data_dir = os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir): name, ext = os.path.splitext(file_name) if",
"ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def load_iris(): return",
"os.listdir(data_dir): name, ext = os.path.splitext(file_name) if ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name))",
"__init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir): name,",
"file_name in os.listdir(data_dir): name, ext = os.path.splitext(file_name) if ext in extensions: data =",
"import os PATH_TO_DATASETS = './mlpractice/datasets/' class DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir",
"= pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def load_iris(): return DataSet(dir_name='iris/') def load_movieLens(): return",
"= './mlpractice/datasets/' class DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name)",
"import pandas import os PATH_TO_DATASETS = './mlpractice/datasets/' class DataSet(object): def __init__(self, dir_name, extensions=['.csv'],",
"if ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def load_iris():",
"name, ext = os.path.splitext(file_name) if ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self,",
"DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for file_name in",
"path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir): name, ext = os.path.splitext(file_name)",
"for file_name in os.listdir(data_dir): name, ext = os.path.splitext(file_name) if ext in extensions: data",
"'./mlpractice/datasets/' class DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for",
"= os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir): name, ext = os.path.splitext(file_name) if ext",
"pandas import os PATH_TO_DATASETS = './mlpractice/datasets/' class DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS):",
"extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def load_iris(): return DataSet(dir_name='iris/') def",
"dir_name) for file_name in os.listdir(data_dir): name, ext = os.path.splitext(file_name) if ext in extensions:",
"= os.path.splitext(file_name) if ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data)",
"in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def load_iris(): return DataSet(dir_name='iris/')",
"pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def load_iris(): return DataSet(dir_name='iris/') def load_movieLens(): return DataSet(dir_name='ml-latest-small/')",
"dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir): name, ext",
"in os.listdir(data_dir): name, ext = os.path.splitext(file_name) if ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir,",
"data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def load_iris(): return DataSet(dir_name='iris/') def load_movieLens():",
"PATH_TO_DATASETS = './mlpractice/datasets/' class DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets,",
"def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for file_name in os.listdir(data_dir):",
"class DataSet(object): def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS): data_dir = os.path.join(path_to_datasets, dir_name) for file_name",
"os.path.splitext(file_name) if ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name, data) def",
"ext = os.path.splitext(file_name) if ext in extensions: data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name)) setattr(self, name,"
] |
[
"QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample)",
"Created by: PyQt5 UI code generator 5.9.2 # # WARNING! All changes made",
"1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)",
"def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\"))",
"this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object):",
"1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1)",
"code generator 5.9.2 # # WARNING! All changes made in this file will",
"self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0))",
"self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\")",
"QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout =",
"0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample)",
"file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def",
"QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0)",
"1, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0,",
"QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0,",
"1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300,",
"self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\",",
"1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching)",
"# # Created by: PyQt5 UI code generator 5.9.2 # # WARNING! All",
"# WARNING! All changes made in this file will be lost! from PyQt5",
"QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog)",
"QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1,",
"1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem,",
"QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout",
"1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1)",
"QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling",
"self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True)",
"self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate)",
"2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout()",
"self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\")",
"self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\")",
"QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample",
"All changes made in this file will be lost! from PyQt5 import QtCore,",
"\"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\",",
"self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2",
"# Created by: PyQt5 UI code generator 5.9.2 # # WARNING! All changes",
"self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching =",
"self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20,",
"0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum,",
"QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample",
"self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40,",
"0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder =",
"self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\")",
"0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)",
"spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch",
"- Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\", \"Batching\"))",
"1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents)",
"PyQt5 import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540)",
"self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1,",
"self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog)",
"changes made in this file will be lost! from PyQt5 import QtCore, QtGui,",
"QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch)",
"\"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\", \"Batching\")) self.pushButtonCancel.setText(_translate(\"resamplingDialog\", \"Cancel\")) self.pushButtonBatch.setText(_translate(\"resamplingDialog\",",
"QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1,",
"generator 5.9.2 # # WARNING! All changes made in this file will be",
"0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate =",
"resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\"))",
"QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1)",
"0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1,",
"= QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0,",
"Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea =",
"self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\",",
"self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching)",
"= QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0,",
"540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)",
"-*- coding: utf-8 -*- # Form implementation generated from reading ui file 'resamplingDialogUi.ui'",
"QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample,",
"self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading =",
"reading ui file 'resamplingDialogUi.ui' # # Created by: PyQt5 UI code generator 5.9.2",
"QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue)",
"300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem",
"self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents =",
"self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\")",
"# Form implementation generated from reading ui file 'resamplingDialogUi.ui' # # Created by:",
"UI code generator 5.9.2 # # WARNING! All changes made in this file",
"lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\")",
"PyQt5 UI code generator 5.9.2 # # WARNING! All changes made in this",
"self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole,",
"= QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch =",
"WARNING! All changes made in this file will be lost! from PyQt5 import",
"self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching,",
"self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate",
"\"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\", \"Batching\")) self.pushButtonCancel.setText(_translate(\"resamplingDialog\", \"Cancel\")) self.pushButtonBatch.setText(_translate(\"resamplingDialog\", \"Batch\")) self.pushButtonApply.setText(_translate(\"resamplingDialog\", \"Apply\"))",
"386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\")",
"= QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\")",
"= QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\",",
"in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class",
"setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0,",
"self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog)",
"QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1)",
"retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\",",
"self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1)",
"0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489))",
"Form implementation generated from reading ui file 'resamplingDialogUi.ui' # # Created by: PyQt5",
"489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout",
"resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0))",
"self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget()",
"QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample)",
"1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 =",
"self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\"))",
"self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0,",
"options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\", \"Batching\")) self.pushButtonCancel.setText(_translate(\"resamplingDialog\", \"Cancel\")) self.pushButtonBatch.setText(_translate(\"resamplingDialog\", \"Batch\"))",
"# -*- coding: utf-8 -*- # Form implementation generated from reading ui file",
"self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout =",
"QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading)",
"self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout,",
"1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\",",
"self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def",
"self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2,",
"self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\")",
"implementation generated from reading ui file 'resamplingDialogUi.ui' # # Created by: PyQt5 UI",
"= QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1,",
"= QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog)",
"QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\")",
"self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample =",
"self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie -",
"self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386,",
"from reading ui file 'resamplingDialogUi.ui' # # Created by: PyQt5 UI code generator",
"# # WARNING! All changes made in this file will be lost! from",
"self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0)",
"self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue",
"ui file 'resamplingDialogUi.ui' # # Created by: PyQt5 UI code generator 5.9.2 #",
"self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel)",
"QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1)",
"self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 =",
"self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20,",
"= QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0,",
"QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents",
"generated from reading ui file 'resamplingDialogUi.ui' # # Created by: PyQt5 UI code",
"self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog):",
"self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject)",
"1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate",
"QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog)",
"self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1,",
"file 'resamplingDialogUi.ui' # # Created by: PyQt5 UI code generator 5.9.2 # #",
"= QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole,",
"self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading",
"class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea",
"= QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\")",
"1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate",
"= QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch)",
"resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current",
"5.9.2 # # WARNING! All changes made in this file will be lost!",
"QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0,",
"self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\", \"Batching\")) self.pushButtonCancel.setText(_translate(\"resamplingDialog\", \"Cancel\"))",
"by: PyQt5 UI code generator 5.9.2 # # WARNING! All changes made in",
"self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching",
"self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0,",
"QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching =",
"self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel =",
"self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300))",
"Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\", \"Batching\")) self.pushButtonCancel.setText(_translate(\"resamplingDialog\",",
"40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1,",
"0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\")",
"= QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\")",
"self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout =",
"0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40,",
"20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\")",
"self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self, resamplingDialog): _translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie",
"self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1) spacerItem =",
"1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1",
"-*- # Form implementation generated from reading ui file 'resamplingDialogUi.ui' # # Created",
"self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName(\"scrollArea\") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0,",
"self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample)",
"100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)",
"QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder,",
"self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0,",
"QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog)",
"self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching",
"= QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1,",
"QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)",
"self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept)",
"= QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\")",
"= QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\")",
"self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply =",
"QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\")",
"0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)",
"= QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1,",
"def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog)",
"resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)",
"from PyQt5 import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406,",
"resamplingDialog.resize(406, 540) self.gridLayout = QtWidgets.QGridLayout(resamplingDialog) self.gridLayout.setObjectName(\"gridLayout\") self.scrollArea = QtWidgets.QScrollArea(resamplingDialog) self.scrollArea.setMinimumSize(QtCore.QSize(0, 0)) self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)",
"QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply)",
"self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(resamplingDialog) self.pushButtonCancel.clicked.connect(resamplingDialog.reject) self.pushButtonApply.clicked.connect(resamplingDialog.accept) self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch) QtCore.QMetaObject.connectSlotsByName(resamplingDialog) def retranslateUi(self,",
"= QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole,",
"self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue =",
"self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0,",
"import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog): resamplingDialog.setObjectName(\"resamplingDialog\") resamplingDialog.resize(406, 540) self.gridLayout",
"self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading",
"'resamplingDialogUi.ui' # # Created by: PyQt5 UI code generator 5.9.2 # # WARNING!",
"= QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxResample.setObjectName(\"groupBoxResample\") self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading =",
"= QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole,",
"QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\")",
"coding: utf-8 -*- # Form implementation generated from reading ui file 'resamplingDialogUi.ui' #",
"self.labelNewRateHeading) self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0,",
"0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding,",
"1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel",
"self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\")) self.labelNewRateHeading.setText(_translate(\"resamplingDialog\", \"Resample to:\")) self.groupBoxBatching.setTitle(_translate(\"resamplingDialog\", \"Batching\")) self.pushButtonCancel.setText(_translate(\"resamplingDialog\", \"Cancel\")) self.pushButtonBatch.setText(_translate(\"resamplingDialog\", \"Batch\")) self.pushButtonApply.setText(_translate(\"resamplingDialog\",",
"will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self,",
"_translate = QtCore.QCoreApplication.translate resamplingDialog.setWindowTitle(_translate(\"resamplingDialog\", \"Meggie - Resampling\")) self.groupBoxResample.setTitle(_translate(\"resamplingDialog\", \"Resampling options:\")) self.labelCurrentRateHeading.setText(_translate(\"resamplingDialog\", \"Current rate:\"))",
"self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\")",
"self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog) self.pushButtonApply.setObjectName(\"pushButtonApply\") self.horizontalLayout.addWidget(self.pushButtonApply) self.gridLayout.addLayout(self.horizontalLayout, 1, 0,",
"utf-8 -*- # Form implementation generated from reading ui file 'resamplingDialogUi.ui' # #",
"QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateValue.setText(\"\") self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading)",
"self.labelCurrentRateValue.setObjectName(\"labelCurrentRateValue\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue) self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelNewRateHeading.setObjectName(\"labelNewRateHeading\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading) self.doubleSpinBoxNewRate =",
"made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets",
"QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog) self.pushButtonCancel.setObjectName(\"pushButtonCancel\") self.horizontalLayout.addWidget(self.pushButtonCancel) self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog) self.pushButtonBatch.setObjectName(\"pushButtonBatch\") self.horizontalLayout.addWidget(self.pushButtonBatch) self.pushButtonApply",
"self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample) self.doubleSpinBoxNewRate.setMaximum(10000.0) self.doubleSpinBoxNewRate.setProperty(\"value\", 100.0) self.doubleSpinBoxNewRate.setObjectName(\"doubleSpinBoxNewRate\") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate) self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0,",
"0, 0, 1, 1) self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) self.groupBoxBatching.setObjectName(\"groupBoxBatching\") self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching) self.gridLayoutBatching.setObjectName(\"gridLayoutBatching\") self.batchingWidgetPlaceholder",
"= QtWidgets.QFormLayout(self.groupBoxResample) self.formLayout.setObjectName(\"formLayout\") self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample) self.labelCurrentRateHeading.setObjectName(\"labelCurrentRateHeading\") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading) self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample)",
"self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489)) self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0)) self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)",
"spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea,",
"be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_resamplingDialog(object): def setupUi(self, resamplingDialog):",
"self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching) self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300)) self.batchingWidgetPlaceholder.setObjectName(\"batchingWidgetPlaceholder\") self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1) self.gridLayout_2.addWidget(self.groupBoxBatching, 1,"
] |
[
"x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32)) expect =",
"def test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32))",
"1], [10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10,",
"1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic() output = relu_dynamic(x) assert (output.asnumpy() ==",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu()",
"== expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10], [1,",
"operations as P from mindspore.ops.operations import _inner_ops as inner class NetRelu(nn.Cell): def __init__(self):",
"test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int32)) expect",
"governing permissions and # limitations under the License. # ============================================================================ import numpy as",
"NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu()",
"1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) print(output.asnumpy(), expect) assert",
"= np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu",
"mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops",
"this file except in compliance with the License. # You may obtain a",
"(output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10],",
"np import pytest import mindspore.context as context import mindspore.nn as nn from mindspore",
"[10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1,",
"relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output",
"Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0",
"ANY KIND, either express or implied. # See the License for the specific",
"test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32)) expect",
"NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU() def construct(self, x): return self.relu(x)",
"0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) print(output.asnumpy(), expect) assert (output.asnumpy()",
"from mindspore.ops import operations as P from mindspore.ops.operations import _inner_ops as inner class",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"language governing permissions and # limitations under the License. # ============================================================================ import numpy",
"as P from mindspore.ops.operations import _inner_ops as inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu,",
"= self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1, 1,",
"def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64))",
"[1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output =",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"= relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x =",
"from mindspore import Tensor from mindspore.ops import operations as P from mindspore.ops.operations import",
"device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")",
"(output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy()",
"OF ANY KIND, either express or implied. # See the License for the",
"pytest import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor",
"relu = NetRelu() output = relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE,",
"[1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output =",
"expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")",
"self.relu = P.ReLU() def construct(self, x): x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training",
"def __init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU() def construct(self, x): return self.relu(x) class",
"np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu =",
"x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect =",
"1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,],",
"Co., Ltd # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"import numpy as np import pytest import mindspore.context as context import mindspore.nn as",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from",
"[10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy()",
"Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1,",
"expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")",
"relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1,",
"10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,], [1,",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64():",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x",
"def test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int8))",
"required by applicable law or agreed to in writing, software # distributed under",
"1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() ==",
"applicable law or agreed to in writing, software # distributed under the License",
"mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations as P",
"@pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1],",
"[10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic() output = relu_dynamic(x) assert (output.asnumpy()",
"or agreed to in writing, software # distributed under the License is distributed",
"self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self, x): x_conv = self.conv(x)",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1,",
"= NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def",
"0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x)",
"2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version",
"1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert",
"relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1,",
"10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output",
"-1, 1], [10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,],",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1,",
"License. # You may obtain a copy of the License at # #",
"compliance with the License. # You may obtain a copy of the License",
"== expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10], [1,",
"inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self, x): x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0",
"10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1,",
"= np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic",
"1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu()",
"relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1,",
"1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic() output = relu_dynamic(x) assert",
"@pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1,",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,], [1,",
"output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x",
"def test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64))",
"# ============================================================================ import numpy as np import pytest import mindspore.context as context import",
"self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU()",
"context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations",
"not use this file except in compliance with the License. # You may",
"1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8)",
"@pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1,",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"P.ReLU() def construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv",
"[1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1, 0,",
"relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard",
"0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all()",
"0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x)",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"import _inner_ops as inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU()",
"def __init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self, x):",
"return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10], [1,",
"as inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU() def construct(self,",
"from mindspore.ops.operations import _inner_ops as inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu",
"@pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1],",
"np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu =",
"1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu()",
"# you may not use this file except in compliance with the License.",
"construct(self, x): x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x",
"[1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output =",
"def test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int32))",
"agreed to in writing, software # distributed under the License is distributed on",
"1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic()",
"(the \"License\"); # you may not use this file except in compliance with",
"1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32)",
"self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self, x): x_conv = self.conv(x) return",
"@pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10,",
"self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1,",
"expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1,",
"1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() ==",
"# Unless required by applicable law or agreed to in writing, software #",
"assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1, 1,",
"[1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic() output =",
"mindspore import Tensor from mindspore.ops import operations as P from mindspore.ops.operations import _inner_ops",
"by applicable law or agreed to in writing, software # distributed under the",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"nn from mindspore import Tensor from mindspore.ops import operations as P from mindspore.ops.operations",
"P from mindspore.ops.operations import _inner_ops as inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__()",
"device_target=\"GPU\") relu = NetRelu() output = relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all()",
"= np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu",
"-1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE,",
"file except in compliance with the License. # You may obtain a copy",
"= P.ReLU() def construct(self, x): x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard",
"assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1,",
"1], [10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10,",
"License for the specific language governing permissions and # limitations under the License.",
"1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32)",
"expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")",
"to in writing, software # distributed under the License is distributed on an",
"and # limitations under the License. # ============================================================================ import numpy as np import",
"implied. # See the License for the specific language governing permissions and #",
"the License. # ============================================================================ import numpy as np import pytest import mindspore.context as",
"\"License\"); # you may not use this file except in compliance with the",
"[10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy()",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all()",
"10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output",
"= P.ReLU() def construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__()",
"[10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) print(output.asnumpy(), expect)",
"import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations as",
"inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU() def construct(self, x):",
"__init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU() def construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell):",
"-1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE,",
"@pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1],",
"or implied. # See the License for the specific language governing permissions and",
"output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x",
"1], [10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10,",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"@pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1],",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"in writing, software # distributed under the License is distributed on an \"AS",
"= relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x =",
"1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64)",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu =",
"def construct(self, x): x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32():",
"self.relu = P.ReLU() def construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic,",
"expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")",
"@pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10,",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int32)) expect =",
"expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1,",
"@pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1,",
"you may not use this file except in compliance with the License. #",
"NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape():",
"0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x)",
"Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License,",
"1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() ==",
"x): return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu",
"def construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv =",
"use this file except in compliance with the License. # You may obtain",
"== expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10], [1,",
"x): x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x =",
"limitations under the License. # ============================================================================ import numpy as np import pytest import",
"print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output =",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape()",
"Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1,",
"device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training",
"[1, -1, 1], [10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0, 1, 10,], [1, 0,",
"[1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output =",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"output = relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output",
"1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert",
"test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect",
"for the specific language governing permissions and # limitations under the License. #",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic() output = relu_dynamic(x) assert (output.asnumpy() == expect).all()",
"numpy as np import pytest import mindspore.context as context import mindspore.nn as nn",
"@pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10,",
"= NetRelu() output = relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")",
"= Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0,",
"np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu =",
"= relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu()",
"# # Unless required by applicable law or agreed to in writing, software",
"import Tensor from mindspore.ops import operations as P from mindspore.ops.operations import _inner_ops as",
"express or implied. # See the License for the specific language governing permissions",
"Tensor from mindspore.ops import operations as P from mindspore.ops.operations import _inner_ops as inner",
"expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1,",
"permissions and # limitations under the License. # ============================================================================ import numpy as np",
"either express or implied. # See the License for the specific language governing",
"== expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() ==",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE,",
"= np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu",
"= NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu =",
"relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1,",
"-1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE,",
"= Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0,",
"-1, 1], [10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,],",
"_inner_ops as inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU() def",
"= relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x =",
"the License. # You may obtain a copy of the License at #",
"mindspore.ops.operations import _inner_ops as inner class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu =",
"Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1,",
"-1, 1], [10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,],",
"NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32():",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"1], [10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10,",
"1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert",
"[10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1,",
"NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8():",
"10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output",
"[10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1,",
"= inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self, x): x_conv = self.conv(x) return self.relu(x_conv)",
"relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x)",
"@pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10,",
"@pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1],",
"Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the",
"relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu",
"@pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10,",
"============================================================================ import numpy as np import pytest import mindspore.context as context import mindspore.nn",
"= Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0,",
"with the License. # You may obtain a copy of the License at",
"test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect",
"context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"-1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE,",
"[10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1,",
"@pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1,",
"return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu =",
"assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1, 1,",
"= Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int32)) expect = np.array([[[[0,",
"law or agreed to in writing, software # distributed under the License is",
"the License for the specific language governing permissions and # limitations under the",
"under the License. # ============================================================================ import numpy as np import pytest import mindspore.context",
"0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x)",
"10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic() output",
"License. # ============================================================================ import numpy as np import pytest import mindspore.context as context",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"(output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10],",
"expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all()",
"assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1, 1,",
"NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self,",
"context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() ==",
"P.ReLU() def construct(self, x): x_conv = self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def",
"in compliance with the License. # You may obtain a copy of the",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"class NetReluDynamic(nn.Cell): def __init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"# limitations under the License. # ============================================================================ import numpy as np import pytest",
"super(NetRelu, self).__init__() self.relu = P.ReLU() def construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell): def",
"1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,],",
"expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x)",
"10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,], [1,",
"0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic = NetReluDynamic() output = relu_dynamic(x)",
"0.]]]]).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy() == expect).all()",
"test_relu_int8(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int8)) expect",
"import pytest import mindspore.context as context import mindspore.nn as nn from mindspore import",
"1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) print(output.asnumpy(),",
"[10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu() output = relu(x) assert (output.asnumpy()",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"self.conv(x) return self.relu(x_conv) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10],",
"1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,],",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"[1, -1, 1], [10, 1, -1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,], [1, 0,",
"class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() self.relu = P.ReLU() def construct(self, x): return",
"super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self, x): x_conv =",
"(output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x = Tensor(np.array([[[[-1, 1, 10],",
"self).__init__() self.relu = P.ReLU() def construct(self, x): return self.relu(x) class NetReluDynamic(nn.Cell): def __init__(self):",
"= relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64(): x =",
"[1, -1, 1], [10, 1, -1]]]]).astype(np.float32)) expect = np.array([[[[0, 1, 10,], [1, 0,",
"(output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x = Tensor(np.array([[[[-1, 1, 10],",
"= relu(x) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu = NetRelu() output =",
"-1, 1], [10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,],",
"mindspore.ops import operations as P from mindspore.ops.operations import _inner_ops as inner class NetRelu(nn.Cell):",
"the specific language governing permissions and # limitations under the License. # ============================================================================",
"expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")",
"Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1,",
"= np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu",
"x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int8)) expect =",
"output = relu(x) assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int32(): x",
"as context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import",
"as nn from mindspore import Tensor from mindspore.ops import operations as P from",
"1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.int64)) expect = np.array([[[[0, 1, 10,],",
"Ltd # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int64) context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu_dynamic =",
"specific language governing permissions and # limitations under the License. # ============================================================================ import",
"output = relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu =",
"import operations as P from mindspore.ops.operations import _inner_ops as inner class NetRelu(nn.Cell): def",
"__init__(self): super(NetReluDynamic, self).__init__() self.conv = inner.GpuConvertToDynamicShape() self.relu = P.ReLU() def construct(self, x): x_conv",
"@pytest.mark.env_onecard def test_relu_float32(): x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1,",
"NetRelu() output = relu(x) print(output.asnumpy(), expect) assert (output.asnumpy() == expect).all() context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\") relu",
"== expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_relu_int64_dynamic_shape(): x = Tensor(np.array([[[[-1, 1, 10], [1,",
"as np import pytest import mindspore.context as context import mindspore.nn as nn from",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"-1]]]]).astype(np.int8)) expect = np.array([[[[0, 1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int8) context.set_context(mode=context.PYNATIVE_MODE,",
"1, 10,], [1, 0, 1,], [10, 1, 0.]]]]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\") relu = NetRelu()",
"<reponame>GuoSuiming/mindspore<filename>tests/st/ops/gpu/test_relu_op.py # Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the"
] |
[
"from zah.urls import render, render_page from zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts import",
"app.use_component(Store) # def view1(request, **kwargs): # return render(request, 'home.html') # @app.as_route('/test2', 'test2') #",
"# app.use_component(Store) # def view1(request, **kwargs): # return render(request, 'home.html') # @app.as_route('/test2', 'test2')",
"render, render_page from zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts import get_default_server app =",
"DevelopmentServer from zah.shortcuts import get_default_server app = BaseServer() # app = get_default_server() #",
"import Router # from zah.store import Store from zah.urls import render, render_page from",
"BaseServer() # app = get_default_server() # app.use_component(Router) # app.use_component(Store) # def view1(request, **kwargs):",
"= get_default_server() # app.use_component(Router) # app.use_component(Store) # def view1(request, **kwargs): # return render(request,",
"Router # from zah.store import Store from zah.urls import render, render_page from zah.core.servers",
"import BaseServer, DevelopmentServer from zah.shortcuts import get_default_server app = BaseServer() # app =",
"import render, render_page from zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts import get_default_server app",
"render_page from zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts import get_default_server app = BaseServer()",
"BaseServer, DevelopmentServer from zah.shortcuts import get_default_server app = BaseServer() # app = get_default_server()",
"zah.router.app import Router # from zah.store import Store from zah.urls import render, render_page",
"render(request, 'home.html') # @app.as_route('/test2', 'test2') # def view2(request, **kwargs): # return render(request, 'home.html')",
"# from zah.store import Store from zah.urls import render, render_page from zah.core.servers import",
"app.use_component(Router) # app.use_component(Store) # def view1(request, **kwargs): # return render(request, 'home.html') # @app.as_route('/test2',",
"@app.as_route('/test2', 'test2') # def view2(request, **kwargs): # return render(request, 'home.html') # app.add_route('/test', view1,",
"app = get_default_server() # app.use_component(Router) # app.use_component(Store) # def view1(request, **kwargs): # return",
"view1(request, **kwargs): # return render(request, 'home.html') # @app.as_route('/test2', 'test2') # def view2(request, **kwargs):",
"get_default_server() # app.use_component(Router) # app.use_component(Store) # def view1(request, **kwargs): # return render(request, 'home.html')",
"# return render(request, 'home.html') # @app.as_route('/test2', 'test2') # def view2(request, **kwargs): # return",
"zah.shortcuts import get_default_server app = BaseServer() # app = get_default_server() # app.use_component(Router) #",
"= BaseServer() # app = get_default_server() # app.use_component(Router) # app.use_component(Store) # def view1(request,",
"# def view2(request, **kwargs): # return render(request, 'home.html') # app.add_route('/test', view1, 'test1') #",
"import get_default_server app = BaseServer() # app = get_default_server() # app.use_component(Router) # app.use_component(Store)",
"get_default_server app = BaseServer() # app = get_default_server() # app.use_component(Router) # app.use_component(Store) #",
"view2(request, **kwargs): # return render(request, 'home.html') # app.add_route('/test', view1, 'test1') # app.add_route('/test3', render_page('home.html'))",
"from zah.router.app import Router # from zah.store import Store from zah.urls import render,",
"zah.urls import render, render_page from zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts import get_default_server",
"from zah.shortcuts import get_default_server app = BaseServer() # app = get_default_server() # app.use_component(Router)",
"Store from zah.urls import render, render_page from zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts",
"app = BaseServer() # app = get_default_server() # app.use_component(Router) # app.use_component(Store) # def",
"from zah.store import Store from zah.urls import render, render_page from zah.core.servers import BaseServer,",
"from zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts import get_default_server app = BaseServer() #",
"# app.use_component(Router) # app.use_component(Store) # def view1(request, **kwargs): # return render(request, 'home.html') #",
"**kwargs): # return render(request, 'home.html') # @app.as_route('/test2', 'test2') # def view2(request, **kwargs): #",
"zah.core.servers import BaseServer, DevelopmentServer from zah.shortcuts import get_default_server app = BaseServer() # app",
"'home.html') # @app.as_route('/test2', 'test2') # def view2(request, **kwargs): # return render(request, 'home.html') #",
"def view2(request, **kwargs): # return render(request, 'home.html') # app.add_route('/test', view1, 'test1') # app.add_route('/test3',",
"import Store from zah.urls import render, render_page from zah.core.servers import BaseServer, DevelopmentServer from",
"# def view1(request, **kwargs): # return render(request, 'home.html') # @app.as_route('/test2', 'test2') # def",
"return render(request, 'home.html') # @app.as_route('/test2', 'test2') # def view2(request, **kwargs): # return render(request,",
"zah.store import Store from zah.urls import render, render_page from zah.core.servers import BaseServer, DevelopmentServer",
"# app = get_default_server() # app.use_component(Router) # app.use_component(Store) # def view1(request, **kwargs): #",
"def view1(request, **kwargs): # return render(request, 'home.html') # @app.as_route('/test2', 'test2') # def view2(request,",
"# @app.as_route('/test2', 'test2') # def view2(request, **kwargs): # return render(request, 'home.html') # app.add_route('/test',",
"'test2') # def view2(request, **kwargs): # return render(request, 'home.html') # app.add_route('/test', view1, 'test1')"
] |
[
"= ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit = \",\" else:",
"[a.replace(\"HP:\", \"\") for a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row",
"ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args()",
"hpo_re = r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit = \",\" else: delimit =",
"delimit = \"\\t\" with open(args.input) as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header",
"import csv, re, argparse ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input",
"all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row = [row[0], row[1], row[2], row[3], row[7],",
"for a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row = [row[0],",
"= csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\",",
"\"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader: if (row[4] == \"possible\"): pass else: all_matches_hpo",
"\"possible\"): pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo =",
"= \",\" else: delimit = \"\\t\" with open(args.input) as csvfile: csv_reader = csv.reader(csvfile,",
"#!/usr/bin/env python import csv, re, argparse ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the",
"\"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader: if (row[4] == \"possible\"): pass",
"re, argparse ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args",
"== \"csv\": delimit = \",\" else: delimit = \"\\t\" with open(args.input) as csvfile:",
"csv, re, argparse ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input file')",
"= [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader:",
"all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\") for a in all_matches_hpo]",
"used in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re = r\"HP\\:\\d+\" if",
"= next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for",
"all_matches_hpo = [a.replace(\"HP:\", \"\") for a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] =",
"\",\" else: delimit = \"\\t\" with open(args.input) as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit,",
"csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\",",
"args = ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit = \",\"",
"= \"\\t\" with open(args.input) as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header =",
"else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\")",
"csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\",",
"HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\") for a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo)",
"delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\",",
"argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re =",
"\"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader: if (row[4] == \"possible\"): pass else:",
"= [a.replace(\"HP:\", \"\") for a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo)",
"file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit == \"csv\":",
"= list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\") for a",
"with open(args.input) as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header",
"= r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit = \",\" else: delimit = \"\\t\"",
"terms all_matches_hpo = [a.replace(\"HP:\", \"\") for a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7]",
"all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\") for",
"else: delimit = \"\\t\" with open(args.input) as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"')",
"\"csv\": delimit = \",\" else: delimit = \"\\t\" with open(args.input) as csvfile: csv_reader",
"\"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader: if (row[4] == \"possible\"):",
"= argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re",
"<filename>decipher/parse_decipher.py #!/usr/bin/env python import csv, re, argparse ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in",
"set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row = [row[0], row[1], row[2], row[3], row[7], row[8], row[12]]",
"\"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader: if (row[4]",
"next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row",
"row[7])) #Unique HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\") for a in all_matches_hpo] all_matches_hpo",
"in csv_reader: if (row[4] == \"possible\"): pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7]))",
"== \"possible\"): pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo",
"\"\") for a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row =",
"args.delimit == \"csv\": delimit = \",\" else: delimit = \"\\t\" with open(args.input) as",
"sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in",
"(row[4] == \"possible\"): pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms",
"as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header = [\"hgnc_gene_name\",",
"[\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader: if",
"python import csv, re, argparse ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file')",
"list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\") for a in",
"in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit",
"if args.delimit == \"csv\": delimit = \",\" else: delimit = \"\\t\" with open(args.input)",
"in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row = [row[0], row[1], row[2],",
"open(args.input) as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header =",
"\"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header)) for row in csv_reader: if (row[4] ==",
"for row in csv_reader: if (row[4] == \"possible\"): pass else: all_matches_hpo = list()",
"csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\",",
"all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row = [row[0], row[1], row[2], row[3],",
"ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit = \",\" else: delimit",
"print('\\t'.join(sub_header)) for row in csv_reader: if (row[4] == \"possible\"): pass else: all_matches_hpo =",
"pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO terms all_matches_hpo = [a.replace(\"HP:\",",
"file') args = ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit =",
"= set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row = [row[0], row[1], row[2], row[3], row[7], row[8],",
"csv_reader: if (row[4] == \"possible\"): pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique",
"quotechar='\"') header = next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"]",
"r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit = \",\" else: delimit = \"\\t\" with",
"a in all_matches_hpo] all_matches_hpo = set(all_matches_hpo) row[7] = \";\".join(all_matches_hpo) sub_row = [row[0], row[1],",
"argparse ap = argparse.ArgumentParser() ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args =",
"delimit = \",\" else: delimit = \"\\t\" with open(args.input) as csvfile: csv_reader =",
"ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re = r\"HP\\:\\d+\"",
"row in csv_reader: if (row[4] == \"possible\"): pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re,",
"ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit == \"csv\": delimit",
"the file') ap.add_argument('--input',required=True,type=str,help='Input file') args = ap.parse_args() hpo_re = r\"HP\\:\\d+\" if args.delimit ==",
"if (row[4] == \"possible\"): pass else: all_matches_hpo = list() all_matches_hpo.extend(re.findall(hpo_re, row[7])) #Unique HPO",
"header = next(csv_reader) sub_header = [\"hgnc_gene_name\", \"omim_gene_id\", \"disease_name\", \"omim_disease_id\", \"hpo\", \"organ_specificity_list\", \"hgnc_id\"] print('\\t'.join(sub_header))",
"row[7] = \";\".join(all_matches_hpo) sub_row = [row[0], row[1], row[2], row[3], row[7], row[8], row[12]] print('\\t'.join(sub_row))",
"\"\\t\" with open(args.input) as csvfile: csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='\"') header = next(csv_reader)",
"#Unique HPO terms all_matches_hpo = [a.replace(\"HP:\", \"\") for a in all_matches_hpo] all_matches_hpo ="
] |
[
"if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7) handler.setFormatter(formater) log.addHandler(handler) return",
"handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7) handler.setFormatter(formater) log.addHandler(handler) return log log = get_log()",
"log: return log log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater",
"<reponame>pkufergus/pingeci # -*- coding: utf-8 -*- import os import logging import logging.handlers import",
"import logging import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"):",
"logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler =",
"import os import logging import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log = None",
"def get_log(name=\"main\"): global log if log: return log log = logging.getLogger('log') log.setLevel(logging.DEBUG) #",
"\"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1,",
"reload(sys) sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"): global log if log: return log",
"log = None def get_log(name=\"main\"): global log if log: return log log =",
"sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"): global log if log: return log log",
"os import logging import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log = None def",
"logging import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"): global",
"= \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\",",
"coding: utf-8 -*- import os import logging import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8')",
"import sys reload(sys) sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"): global log if log:",
"= logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not",
"logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"): global log if",
"# -*- coding: utf-8 -*- import os import logging import logging.handlers import sys",
"not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7) handler.setFormatter(formater) log.addHandler(handler) return log",
"logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7) handler.setFormatter(formater) log.addHandler(handler)",
"import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"): global log",
"get_log(name=\"main\"): global log if log: return log log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO)",
"-*- coding: utf-8 -*- import os import logging import logging.handlers import sys reload(sys)",
"= None def get_log(name=\"main\"): global log if log: return log log = logging.getLogger('log')",
"log log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt)",
"= logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7) handler.setFormatter(formater)",
"return log log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater =",
"fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name),",
"global log if log: return log log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt",
"log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if",
"utf-8 -*- import os import logging import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log",
"log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\")",
"os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7) handler.setFormatter(formater) log.addHandler(handler) return log log =",
"formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7)",
"None def get_log(name=\"main\"): global log if log: return log log = logging.getLogger('log') log.setLevel(logging.DEBUG)",
"os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler = logging.handlers.TimedRotatingFileHandler(\"./log/{}.log\".format(name), \"midnight\", 1, 7) handler.setFormatter(formater) log.addHandler(handler) return log log",
"sys reload(sys) sys.setdefaultencoding('utf-8') log = None def get_log(name=\"main\"): global log if log: return",
"logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"):",
"if log: return log log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\"",
"log if log: return log log = logging.getLogger('log') log.setLevel(logging.DEBUG) # logG.setLevel(logging.INFO) fmt =",
"# logG.setLevel(logging.INFO) fmt = \"%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s\" formater = logging.Formatter(fmt) if not os.path.exists(\"./log/\"): os.mkdir(\"./log/\") handler",
"-*- import os import logging import logging.handlers import sys reload(sys) sys.setdefaultencoding('utf-8') log ="
] |
[] |
[
"img * 0.5 + seg * 0.5 mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) if __name__ ==",
"def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('img_dir', help='img",
"mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img * 0.5 + seg * 0.5 #",
"img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) #",
"os.path as osp import mmcv def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint",
"directory') args = parser.parse_args() return args # def main(): # args = parse_args()",
"for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix) # img =",
"seg_file)) # binded = img * 0.5 + seg * 0.5 # mmcv.imwrite(binded,",
"* 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args = parse_args() img_suffix =",
"seg_file = img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file))",
"help='output config directory') args = parser.parse_args() return args # def main(): # args",
"parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('img_dir', help='img config",
"help='gt config directory') parser.add_argument('out_dir', help='output config directory') args = parser.parse_args() return args #",
"checkpoint to be published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir',",
"binded = img * 0.5 + seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file))",
"main(): # args = parse_args() # img_suffix = '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png'",
"img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir,",
"in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)):",
"# def main(): # args = parse_args() # img_suffix = '_leftImg8bit.png' # seg_map_suffix",
"'.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file",
"argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt",
"* 0.5 + seg * 0.5 mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) if __name__ == '__main__':",
"img = mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img * 0.5",
"# seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): #",
"0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args = parse_args() img_suffix = '.jpg'",
"parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output config directory') args",
"= '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix,",
"continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img *",
"config directory') parser.add_argument('out_dir', help='output config directory') args = parser.parse_args() return args # def",
"a checkpoint to be published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt config directory')",
"seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file =",
"args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img",
"= mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img *",
"directory') parser.add_argument('out_dir', help='output config directory') args = parser.parse_args() return args # def main():",
"mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args = parse_args() img_suffix = '.jpg' seg_map_suffix =",
"# mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args = parse_args() img_suffix = '.jpg' seg_map_suffix",
"if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file))",
"args = parse_args() # img_suffix = '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir)",
"help='img config directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output config directory') args =",
"parse_args() # img_suffix = '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for",
"return args # def main(): # args = parse_args() # img_suffix = '_leftImg8bit.png'",
"= '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir,",
"= img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg",
"mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file)) #",
"seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img * 0.5 + seg *",
"parser.add_argument('out_dir', help='output config directory') args = parser.parse_args() return args # def main(): #",
"directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output config directory') args = parser.parse_args() return",
"recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir,",
"parser.parse_args() return args # def main(): # args = parse_args() # img_suffix =",
"img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img * 0.5 + seg *",
"* 0.5 + seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args",
"= '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True):",
"img_suffix = '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix,",
"= parser.parse_args() return args # def main(): # args = parse_args() # img_suffix",
"binded = img * 0.5 + seg * 0.5 mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) if",
"# args = parse_args() # img_suffix = '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' #",
"in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file))",
"= argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir',",
"osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded =",
"0.5 + seg * 0.5 mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) if __name__ == '__main__': main()",
"img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir,",
"seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args = parse_args() img_suffix",
"= '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file =",
"'.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix)",
"= parse_args() img_suffix = '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir(",
"seg_file)) binded = img * 0.5 + seg * 0.5 mmcv.imwrite(binded, osp.join(args.out_dir, img_file))",
"= img * 0.5 + seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def",
"mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue",
"mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix) #",
"+ seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args = parse_args()",
"img_suffix = '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file in",
"# mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix)",
"= mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img * 0.5 + seg * 0.5 mmcv.imwrite(binded,",
"mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if",
"mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img * 0.5 + seg * 0.5 mmcv.imwrite(binded, osp.join(args.out_dir,",
"osp import mmcv def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be",
"img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img * 0.5 +",
"seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file",
"suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg",
"description='Process a checkpoint to be published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt config",
"args = parser.parse_args() return args # def main(): # args = parse_args() #",
"suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img =",
"img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img",
"mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img * 0.5",
"img_file)) def main(): args = parse_args() img_suffix = '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir)",
"# binded = img * 0.5 + seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir,",
"osp.join(args.out_dir, img_file)) def main(): args = parse_args() img_suffix = '.jpg' seg_map_suffix = '.png'",
"mmcv def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('img_dir',",
"main(): args = parse_args() img_suffix = '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file",
"seg_file = img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir,",
"as osp import mmcv def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to",
"argparse import os.path as osp import mmcv def parse_args(): parser = argparse.ArgumentParser( description='Process",
"'_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix):",
"parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output config directory') args = parser.parse_args() return args",
"args # def main(): # args = parse_args() # img_suffix = '_leftImg8bit.png' #",
"# seg_file = img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg =",
"seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded",
"0.5 + seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main(): args =",
"= mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img * 0.5 + seg * 0.5",
"seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img",
"not osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded",
"be published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output config",
"mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img * 0.5 + seg",
"= img * 0.5 + seg * 0.5 mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) if __name__",
"# for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix, seg_map_suffix) # img",
"# img_suffix = '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file",
"seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir,",
"img_file.replace(img_suffix, seg_map_suffix) if not osp.exists(osp.join(args.gt_dir, seg_file)): continue img = mmcv.imread(osp.join(args.img_dir, img_file)) seg =",
"seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img * 0.5 + seg * 0.5",
"parse_args() img_suffix = '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in mmcv.scandir( args.img_dir,",
"def main(): # args = parse_args() # img_suffix = '_leftImg8bit.png' # seg_map_suffix =",
"config directory') args = parser.parse_args() return args # def main(): # args =",
"# img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded =",
"# seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) # binded = img * 0.5 + seg",
"for img_file in mmcv.scandir( args.img_dir, suffix=img_suffix, recursive=True): seg_file = img_file.replace(img_suffix, seg_map_suffix) if not",
"to be published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output",
"= img_file.replace(img_suffix, seg_map_suffix) # img = mmcv.imread(osp.join(args.img_dir, img_file)) # seg = mmcv.imread(osp.join(args.gt_dir, seg_file))",
"parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('img_dir', help='img config directory')",
"'_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) # for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix): # seg_file = img_file.replace(img_suffix,",
"import os.path as osp import mmcv def parse_args(): parser = argparse.ArgumentParser( description='Process a",
"img * 0.5 + seg * 0.5 # mmcv.imwrite(binded, osp.join(args.out_dir, img_file)) def main():",
"= mmcv.imread(osp.join(args.img_dir, img_file)) seg = mmcv.imread(osp.join(args.gt_dir, seg_file)) binded = img * 0.5 +",
"def main(): args = parse_args() img_suffix = '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for",
"= parse_args() # img_suffix = '_leftImg8bit.png' # seg_map_suffix = '_gtFine_color.png' # mmcv.mkdir_or_exist(args.out_dir) #",
"import argparse import os.path as osp import mmcv def parse_args(): parser = argparse.ArgumentParser(",
"import mmcv def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published')",
"published') parser.add_argument('img_dir', help='img config directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output config directory')",
"config directory') parser.add_argument('gt_dir', help='gt config directory') parser.add_argument('out_dir', help='output config directory') args = parser.parse_args()",
"args = parse_args() img_suffix = '.jpg' seg_map_suffix = '.png' mmcv.mkdir_or_exist(args.out_dir) for img_file in"
] |
[
"whether the string is ALL CAPS. For example: is_uppercase(\"c\") == False is_uppercase(\"C\") ==",
"inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM DONALD\", False) gen_test_case(\"HELLO",
"method is_uppercase() to see whether the string is ALL CAPS. For example: is_uppercase(\"c\")",
"any string containing no letters at all is trivially considered to be in",
"CAPS. ''' def is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic",
"is said to be in ALL CAPS whenever it does not contain any",
"to be in ALL CAPS whenever it does not contain any lowercase letter",
"the string is ALL CAPS. For example: is_uppercase(\"c\") == False is_uppercase(\"C\") == True",
"== True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata, a string",
"from Test import Test, Test as test ''' Is the string uppercase? Task",
"<reponame>sirken/coding-practice from Test import Test, Test as test ''' Is the string uppercase?",
"True is_uppercase(\"hello I AM DONALD\") == False is_uppercase(\"HELLO I AM DONALD\") == True",
"== True In this Kata, a string is said to be in ALL",
"gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I",
"is_uppercase() to see whether the string is ALL CAPS. For example: is_uppercase(\"c\") ==",
"said to be in ALL CAPS whenever it does not contain any lowercase",
"string is said to be in ALL CAPS whenever it does not contain",
"I AM DONALD\") == False is_uppercase(\"HELLO I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") ==",
"is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata, a string is said",
"ALL CAPS. ''' def is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp)",
"no letters at all is trivially considered to be in ALL CAPS. '''",
"False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM DONALD\", False) gen_test_case(\"HELLO I AM DONALD\", True)",
"is_uppercase(\"HELLO I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In",
"False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata, a string is said to be",
"a string is said to be in ALL CAPS whenever it does not",
"trivially considered to be in ALL CAPS. ''' def is_uppercase(inp): return inp.isupper() def",
"CAPS. For example: is_uppercase(\"c\") == False is_uppercase(\"C\") == True is_uppercase(\"hello I AM DONALD\")",
"contain any lowercase letter so any string containing no letters at all is",
"it does not contain any lowercase letter so any string containing no letters",
"as test ''' Is the string uppercase? Task Create a method is_uppercase() to",
"this Kata, a string is said to be in ALL CAPS whenever it",
"''' def is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\")",
"not contain any lowercase letter so any string containing no letters at all",
"== True is_uppercase(\"hello I AM DONALD\") == False is_uppercase(\"HELLO I AM DONALD\") ==",
"CAPS whenever it does not contain any lowercase letter so any string containing",
"be in ALL CAPS whenever it does not contain any lowercase letter so",
"import Test, Test as test ''' Is the string uppercase? Task Create a",
"a method is_uppercase() to see whether the string is ALL CAPS. For example:",
"For example: is_uppercase(\"c\") == False is_uppercase(\"C\") == True is_uppercase(\"hello I AM DONALD\") ==",
"to see whether the string is ALL CAPS. For example: is_uppercase(\"c\") == False",
"False is_uppercase(\"C\") == True is_uppercase(\"hello I AM DONALD\") == False is_uppercase(\"HELLO I AM",
"does not contain any lowercase letter so any string containing no letters at",
"in ALL CAPS. ''' def is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res,",
"example: is_uppercase(\"c\") == False is_uppercase(\"C\") == True is_uppercase(\"hello I AM DONALD\") == False",
"ALL CAPS. For example: is_uppercase(\"c\") == False is_uppercase(\"C\") == True is_uppercase(\"hello I AM",
"DONALD\") == False is_uppercase(\"HELLO I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\")",
"containing no letters at all is trivially considered to be in ALL CAPS.",
"== False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata, a string is said to",
"in ALL CAPS whenever it does not contain any lowercase letter so any",
"Test import Test, Test as test ''' Is the string uppercase? Task Create",
"letter so any string containing no letters at all is trivially considered to",
"== False is_uppercase(\"C\") == True is_uppercase(\"hello I AM DONALD\") == False is_uppercase(\"HELLO I",
"In this Kata, a string is said to be in ALL CAPS whenever",
"see whether the string is ALL CAPS. For example: is_uppercase(\"c\") == False is_uppercase(\"C\")",
"is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False)",
"any lowercase letter so any string containing no letters at all is trivially",
"test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM DONALD\", False) gen_test_case(\"HELLO I",
"res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM DONALD\", False)",
"Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM DONALD\", False) gen_test_case(\"HELLO I AM",
"False is_uppercase(\"HELLO I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True",
"string is ALL CAPS. For example: is_uppercase(\"c\") == False is_uppercase(\"C\") == True is_uppercase(\"hello",
"test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM DONALD\",",
"whenever it does not contain any lowercase letter so any string containing no",
"be in ALL CAPS. ''' def is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp),",
"True In this Kata, a string is said to be in ALL CAPS",
"res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM",
"AM DONALD\") == False is_uppercase(\"HELLO I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False",
"to be in ALL CAPS. ''' def is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res):",
"string uppercase? Task Create a method is_uppercase() to see whether the string is",
"def is_uppercase(inp): return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\",",
"Is the string uppercase? Task Create a method is_uppercase() to see whether the",
"string containing no letters at all is trivially considered to be in ALL",
"return inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\",",
"def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello",
"all is trivially considered to be in ALL CAPS. ''' def is_uppercase(inp): return",
"at all is trivially considered to be in ALL CAPS. ''' def is_uppercase(inp):",
"Create a method is_uppercase() to see whether the string is ALL CAPS. For",
"Kata, a string is said to be in ALL CAPS whenever it does",
"Test, Test as test ''' Is the string uppercase? Task Create a method",
"is_uppercase(\"C\") == True is_uppercase(\"hello I AM DONALD\") == False is_uppercase(\"HELLO I AM DONALD\")",
"test ''' Is the string uppercase? Task Create a method is_uppercase() to see",
"the string uppercase? Task Create a method is_uppercase() to see whether the string",
"== False is_uppercase(\"HELLO I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") ==",
"is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata, a string is said to be in",
"ALL CAPS whenever it does not contain any lowercase letter so any string",
"lowercase letter so any string containing no letters at all is trivially considered",
"so any string containing no letters at all is trivially considered to be",
"True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata, a string is",
"gen_test_case(\"c\", False) gen_test_case(\"C\", True) gen_test_case(\"hello I AM DONALD\", False) gen_test_case(\"HELLO I AM DONALD\",",
"letters at all is trivially considered to be in ALL CAPS. ''' def",
"I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this",
"Task Create a method is_uppercase() to see whether the string is ALL CAPS.",
"AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata,",
"''' Is the string uppercase? Task Create a method is_uppercase() to see whether",
"uppercase? Task Create a method is_uppercase() to see whether the string is ALL",
"DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False is_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True In this Kata, a",
"is trivially considered to be in ALL CAPS. ''' def is_uppercase(inp): return inp.isupper()",
"is_uppercase(\"hello I AM DONALD\") == False is_uppercase(\"HELLO I AM DONALD\") == True is_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\")",
"is_uppercase(\"c\") == False is_uppercase(\"C\") == True is_uppercase(\"hello I AM DONALD\") == False is_uppercase(\"HELLO",
"is ALL CAPS. For example: is_uppercase(\"c\") == False is_uppercase(\"C\") == True is_uppercase(\"hello I",
"Test as test ''' Is the string uppercase? Task Create a method is_uppercase()",
"inp.isupper() def gen_test_case(inp, res): test.assert_equals(is_uppercase(inp), res, inp) test.describe(\"Basic Tests\") gen_test_case(\"c\", False) gen_test_case(\"C\", True)",
"considered to be in ALL CAPS. ''' def is_uppercase(inp): return inp.isupper() def gen_test_case(inp,"
] |
[
"'.gif': 'image/gif', '.txt': 'text/plain', } def to_mimetype(ext): if ext.lower() in MIME_TYPES: return MIME_TYPES[ext.lower()]",
"'image/gif', '.txt': 'text/plain', } def to_mimetype(ext): if ext.lower() in MIME_TYPES: return MIME_TYPES[ext.lower()] else:",
"'.png': 'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', } def to_mimetype(ext):",
"'text/plain', } def to_mimetype(ext): if ext.lower() in MIME_TYPES: return MIME_TYPES[ext.lower()] else: return \"application/octet-stream\"",
"'.txt': 'text/plain', } def to_mimetype(ext): if ext.lower() in MIME_TYPES: return MIME_TYPES[ext.lower()] else: return",
"{ '.png': 'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', } def",
"MIME_TYPES = { '.png': 'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain',",
"'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', } def to_mimetype(ext): if",
"'.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', } def to_mimetype(ext): if ext.lower() in MIME_TYPES:",
"'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', } def to_mimetype(ext): if ext.lower() in MIME_TYPES: return",
"'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', } def to_mimetype(ext): if ext.lower()",
"<gh_stars>10-100 MIME_TYPES = { '.png': 'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt':",
"'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', } def to_mimetype(ext): if ext.lower() in",
"= { '.png': 'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.gif': 'image/gif', '.txt': 'text/plain', }"
] |
[
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"= ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator: async def get_cert(self, domain: str,",
"KIND, either express or implied. # See the License for the specific language",
"= \"UNKNOWN (%s)\" % repr(e) print(\"IP {} is {}\".format(ip_address, result_str)) return 0 if",
"Unless required by applicable law or agreed to in writing, software # distributed",
"0 if all_good else 1 if __name__ == \"__main__\": parser = argparse.ArgumentParser( \"Checks",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"# See the License for the specific language governing permissions and # limitations",
"{} is {}\".format(ip_address, result_str)) return 0 if all_good else 1 if __name__ ==",
"License. # You may obtain a copy of the License at # #",
"for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except",
"= DomainIpValidator() all_good = True for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address),",
"the validation fails. \"\"\" cert = await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert)))",
"ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError,",
"__name__ == \"__main__\": parser = argparse.ArgumentParser( \"Checks if the given IP addresses are",
"nargs=\"+\", help=\"The IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout in",
"import argparse import asyncio import certifi import ipaddress import logging import pprint import",
"server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str, ip: str, timeout=2.0):",
"import ipaddress import logging import pprint import ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,",
"logging import pprint import ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname =",
"the given IP addresses are valid for the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain",
"type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout",
"law or agreed to in writing, software # distributed under the License is",
"class DomainIpValidator: async def get_cert(self, domain: str, ip: str, timeout=2.0): ip = str(ip)",
"the License for the specific language governing permissions and # limitations under the",
"if the IP is valid for the domain. Raises exception if the validation",
"validator = DomainIpValidator() all_good = True for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain,",
"str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good",
"except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good = False result_str = \"UNKNOWN",
"compliance with the License. # You may obtain a copy of the License",
"validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e:",
"False class DomainIpValidator: async def get_cert(self, domain: str, ip: str, timeout=2.0): ip =",
"\"Checks if the given IP addresses are valid for the domain\") parser.add_argument(\"domain\", type=str,",
"permissions and # limitations under the License. import argparse import asyncio import certifi",
"is {}\".format(ip_address, result_str)) return 0 if all_good else 1 if __name__ == \"__main__\":",
"exception if the validation fails. \"\"\" cert = await self.get_cert(domain, ip, timeout) if",
"domain: str, ip: str, timeout=2.0): ip = str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection(",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"import asyncio import certifi import ipaddress import logging import pprint import ssl import",
"this file except in compliance with the License. # You may obtain a",
"ip: str, timeout=2.0): ip = str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip,",
"self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"validate_ip(self, domain: str, ip: str, timeout=2.0): \"\"\" Returns successfully if the IP is",
"await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async def",
"ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else",
"you may not use this file except in compliance with the License. #",
"for the specific language governing permissions and # limitations under the License. import",
"the License. import argparse import asyncio import certifi import ipaddress import logging import",
"import pprint import ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False",
"str, ip: str, timeout=2.0): ip = str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol,",
"under the License. import argparse import asyncio import certifi import ipaddress import logging",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"timeout=2.0): \"\"\" Returns successfully if the IP is valid for the domain. Raises",
"async def get_cert(self, domain: str, ip: str, timeout=2.0): ip = str(ip) transport, _proto",
"timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good =",
"host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str,",
"str, timeout=2.0): \"\"\" Returns successfully if the IP is valid for the domain.",
"validation fails. \"\"\" cert = await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert,",
"argparse.ArgumentParser( \"Checks if the given IP addresses are valid for the domain\") parser.add_argument(\"domain\",",
"_SSL_CONTEXT.check_hostname = False class DomainIpValidator: async def get_cert(self, domain: str, ip: str, timeout=2.0):",
"governing permissions and # limitations under the License. import argparse import asyncio import",
"in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError,",
"ANY KIND, either express or implied. # See the License for the specific",
"import certifi import ipaddress import logging import pprint import ssl import sys _SSL_CONTEXT",
"specific language governing permissions and # limitations under the License. import argparse import",
"IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout in seconds for",
"ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good = False result_str = \"UNKNOWN (%s)\" %",
"in compliance with the License. # You may obtain a copy of the",
"str, timeout=2.0): ip = str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443,",
"all_good else 1 if __name__ == \"__main__\": parser = argparse.ArgumentParser( \"Checks if the",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"successfully if the IP is valid for the domain. Raises exception if the",
"{}\".format(ip_address, result_str)) return 0 if all_good else 1 if __name__ == \"__main__\": parser",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"domain: str, ip: str, timeout=2.0): \"\"\" Returns successfully if the IP is valid",
"use this file except in compliance with the License. # You may obtain",
"Returns successfully if the IP is valid for the domain. Raises exception if",
"valid for the domain. Raises exception if the validation fails. \"\"\" cert =",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)",
"for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float,",
"not use this file except in compliance with the License. # You may",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\")",
"are valid for the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to validate the IPs",
"return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str, ip: str, timeout=2.0): \"\"\" Returns successfully",
"See the License for the specific language governing permissions and # limitations under",
"get_cert(self, domain: str, ip: str, timeout=2.0): ip = str(ip) transport, _proto = await",
"import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator: async def",
"result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good = False",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"(%s)\" % repr(e) print(\"IP {} is {}\".format(ip_address, result_str)) return 0 if all_good else",
"cert = await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args):",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"parser = argparse.ArgumentParser( \"Checks if the given IP addresses are valid for the",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"License. import argparse import asyncio import certifi import ipaddress import logging import pprint",
"def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator = DomainIpValidator() all_good = True",
"_proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\")",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"type=str, help=\"The domain to validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator: async def get_cert(self, domain: str, ip:",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"given IP addresses are valid for the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to",
"ip: str, timeout=2.0): \"\"\" Returns successfully if the IP is valid for the",
"2018 Jigsaw Operations LLC # # Licensed under the Apache License, Version 2.0",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close()",
"IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\",",
"asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as",
"if __name__ == \"__main__\": parser = argparse.ArgumentParser( \"Checks if the given IP addresses",
"OF ANY KIND, either express or implied. # See the License for the",
"transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator = DomainIpValidator() all_good = True for",
"= False class DomainIpValidator: async def get_cert(self, domain: str, ip: str, timeout=2.0): ip",
"asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self,",
"repr(e) print(\"IP {} is {}\".format(ip_address, result_str)) return 0 if all_good else 1 if",
"# you may not use this file except in compliance with the License.",
"= argparse.ArgumentParser( \"Checks if the given IP addresses are valid for the domain\")",
"# Copyright 2018 Jigsaw Operations LLC # # Licensed under the Apache License,",
"sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator: async def get_cert(self,",
"DomainIpValidator: async def get_cert(self, domain: str, ip: str, timeout=2.0): ip = str(ip) transport,",
"e: all_good = False result_str = \"UNKNOWN (%s)\" % repr(e) print(\"IP {} is",
"asyncio.TimeoutError) as e: all_good = False result_str = \"UNKNOWN (%s)\" % repr(e) print(\"IP",
"agreed to in writing, software # distributed under the License is distributed on",
"await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if",
"if args.debug else logging.INFO) validator = DomainIpValidator() all_good = True for ip_address in",
"for the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to validate the IPs for\") parser.add_argument(\"ip_address\",",
"import ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator:",
"all_good = False result_str = \"UNKNOWN (%s)\" % repr(e) print(\"IP {} is {}\".format(ip_address,",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"IP is valid for the domain. Raises exception if the validation fails. \"\"\"",
"if all_good else 1 if __name__ == \"__main__\": parser = argparse.ArgumentParser( \"Checks if",
"if the given IP addresses are valid for the domain\") parser.add_argument(\"domain\", type=str, help=\"The",
"OSError, asyncio.TimeoutError) as e: all_good = False result_str = \"UNKNOWN (%s)\" % repr(e)",
"(the \"License\"); # you may not use this file except in compliance with",
"timeout=2.0): ip = str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT,",
"asyncio import certifi import ipaddress import logging import pprint import ssl import sys",
"ip = str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain),",
"Raises exception if the validation fails. \"\"\" cert = await self.get_cert(domain, ip, timeout)",
"language governing permissions and # limitations under the License. import argparse import asyncio",
"\"__main__\": parser = argparse.ArgumentParser( \"Checks if the given IP addresses are valid for",
"# # Unless required by applicable law or agreed to in writing, software",
"express or implied. # See the License for the specific language governing permissions",
"the specific language governing permissions and # limitations under the License. import argparse",
"logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator = DomainIpValidator() all_good = True for ip_address",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"# Unless required by applicable law or agreed to in writing, software #",
"except in compliance with the License. # You may obtain a copy of",
"by applicable law or agreed to in writing, software # distributed under the",
"validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to query\") parser.add_argument(\"--debug\",",
"LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"= False result_str = \"UNKNOWN (%s)\" % repr(e) print(\"IP {} is {}\".format(ip_address, result_str))",
"asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain:",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str, ip:",
"domain to validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to",
"= str(ip) transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout)",
"either express or implied. # See the License for the specific language governing",
"= \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good = False result_str",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"addresses are valid for the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to validate the",
"certifi import ipaddress import logging import pprint import ssl import sys _SSL_CONTEXT =",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"pprint import ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class",
"logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator = DomainIpValidator()",
"str, ip: str, timeout=2.0): \"\"\" Returns successfully if the IP is valid for",
"1 if __name__ == \"__main__\": parser = argparse.ArgumentParser( \"Checks if the given IP",
"address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout in seconds for getting",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"= await asyncio.wait_for(asyncio.get_event_loop().create_connection( asyncio.Protocol, host=ip, port=443, ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async",
"True for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\"",
"help=\"The IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout in seconds",
"file except in compliance with the License. # You may obtain a copy",
"to validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to query\")",
"domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator = DomainIpValidator() all_good =",
"result_str)) return 0 if all_good else 1 if __name__ == \"__main__\": parser =",
"parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0,",
"query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout in seconds for getting the certificate\")",
"try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError)",
"else logging.INFO) validator = DomainIpValidator() all_good = True for ip_address in args.ip_address: try:",
"is valid for the domain. Raises exception if the validation fails. \"\"\" cert",
"\"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good = False result_str =",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"parser.add_argument(\"domain\", type=str, help=\"The domain to validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The",
"License for the specific language governing permissions and # limitations under the License.",
"fails. \"\"\" cert = await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain)",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"the IP is valid for the domain. Raises exception if the validation fails.",
"(ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e: all_good = False result_str = \"UNKNOWN (%s)\"",
"\"\"\" cert = await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def",
"the License. # You may obtain a copy of the License at #",
"def validate_ip(self, domain: str, ip: str, timeout=2.0): \"\"\" Returns successfully if the IP",
"if the validation fails. \"\"\" cert = await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG):",
"to in writing, software # distributed under the License is distributed on an",
"print(\"IP {} is {}\".format(ip_address, result_str)) return 0 if all_good else 1 if __name__",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"= True for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str =",
"cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator: async def get_cert(self, domain: str, ip: str,",
"ssl=_SSL_CONTEXT, server_hostname=domain), timeout) transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str, ip: str,",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"implied. # See the License for the specific language governing permissions and #",
"Operations LLC # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"import logging import pprint import ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname",
"\"UNKNOWN (%s)\" % repr(e) print(\"IP {} is {}\".format(ip_address, result_str)) return 0 if all_good",
"\"License\"); # you may not use this file except in compliance with the",
"help=\"The domain to validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\", help=\"The IP address",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"# limitations under the License. import argparse import asyncio import certifi import ipaddress",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"and # limitations under the License. import argparse import asyncio import certifi import",
"required by applicable law or agreed to in writing, software # distributed under",
"async def validate_ip(self, domain: str, ip: str, timeout=2.0): \"\"\" Returns successfully if the",
"return 0 if all_good else 1 if __name__ == \"__main__\": parser = argparse.ArgumentParser(",
"limitations under the License. import argparse import asyncio import certifi import ipaddress import",
"def get_cert(self, domain: str, ip: str, timeout=2.0): ip = str(ip) transport, _proto =",
"% repr(e) print(\"IP {} is {}\".format(ip_address, result_str)) return 0 if all_good else 1",
"logging.INFO) validator = DomainIpValidator() all_good = True for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete(",
"applicable law or agreed to in writing, software # distributed under the License",
"argparse import asyncio import certifi import ipaddress import logging import pprint import ssl",
"Jigsaw Operations LLC # # Licensed under the Apache License, Version 2.0 (the",
"the domain. Raises exception if the validation fails. \"\"\" cert = await self.get_cert(domain,",
"args.debug else logging.INFO) validator = DomainIpValidator() all_good = True for ip_address in args.ip_address:",
"ipaddress import logging import pprint import ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where())",
"args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str = \"VALID\" except (ssl.CertificateError, ConnectionRefusedError, OSError,",
"\"\"\" Returns successfully if the IP is valid for the domain. Raises exception",
"valid for the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to validate the IPs for\")",
"ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator = DomainIpValidator() all_good",
"all_good = True for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout)) result_str",
"parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout in seconds for getting the certificate\") sys.exit(main(parser.parse_args()))",
"else 1 if __name__ == \"__main__\": parser = argparse.ArgumentParser( \"Checks if the given",
"timeout) transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str, ip: str, timeout=2.0): \"\"\"",
"or agreed to in writing, software # distributed under the License is distributed",
"for the domain. Raises exception if the validation fails. \"\"\" cert = await",
"False result_str = \"UNKNOWN (%s)\" % repr(e) print(\"IP {} is {}\".format(ip_address, result_str)) return",
"or implied. # See the License for the specific language governing permissions and",
"_SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator: async def get_cert(self, domain:",
"transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str, ip: str, timeout=2.0): \"\"\" Returns successfully if",
"if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator",
"transport.close() return transport.get_extra_info(\"peercert\") async def validate_ip(self, domain: str, ip: str, timeout=2.0): \"\"\" Returns",
"to query\") parser.add_argument(\"--debug\", action=\"store_true\") parser.add_argument(\"--timeout\", type=float, default=2.0, help=\"Timeout in seconds for getting the",
"<filename>netanalysis/tls/domain_ip_validator.py # Copyright 2018 Jigsaw Operations LLC # # Licensed under the Apache",
"Copyright 2018 Jigsaw Operations LLC # # Licensed under the Apache License, Version",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) validator =",
"== \"__main__\": parser = argparse.ArgumentParser( \"Checks if the given IP addresses are valid",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address,",
"as e: all_good = False result_str = \"UNKNOWN (%s)\" % repr(e) print(\"IP {}",
"result_str = \"UNKNOWN (%s)\" % repr(e) print(\"IP {} is {}\".format(ip_address, result_str)) return 0",
"ssl import sys _SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where()) _SSL_CONTEXT.check_hostname = False class DomainIpValidator: async",
"with the License. # You may obtain a copy of the License at",
"domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to validate the IPs for\") parser.add_argument(\"ip_address\", type=ipaddress.ip_address, nargs=\"+\",",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"domain. Raises exception if the validation fails. \"\"\" cert = await self.get_cert(domain, ip,",
"in writing, software # distributed under the License is distributed on an \"AS",
"= await self.get_cert(domain, ip, timeout) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(\"Certificate:\\n{}\".format(pprint.pformat(cert))) ssl.match_hostname(cert, domain) def main(args): logging.basicConfig(level=logging.DEBUG",
"IP addresses are valid for the domain\") parser.add_argument(\"domain\", type=str, help=\"The domain to validate",
"DomainIpValidator() all_good = True for ip_address in args.ip_address: try: asyncio.get_event_loop().run_until_complete( validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout))",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use"
] |
[
"DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX \"worker_cache_table_expires\" ON \"worker_cache_table\" (\"expires\"); \"\"\" )",
"# Generated by Django 2.2.12 on 2020-05-05 17:07 from django.db import migrations class",
"Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\" DROP TABLE IF",
"INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX \"worker_cache_table_expires\" ON \"worker_cache_table\" (\"expires\"); \"\"\" ) ]",
"on 2020-05-05 17:07 from django.db import migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")]",
"text NOT NULL, \"expires\" timestamp with time zone NOT NULL ); DROP INDEX",
"from django.db import migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [",
"migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\" DROP",
"TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL PRIMARY KEY, \"value\" text NOT NULL,",
"NOT NULL PRIMARY KEY, \"value\" text NOT NULL, \"expires\" timestamp with time zone",
"TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL PRIMARY",
"Generated by Django 2.2.12 on 2020-05-05 17:07 from django.db import migrations class Migration(migrations.Migration):",
"time zone NOT NULL ); DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX \"worker_cache_table_expires\"",
"CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL PRIMARY KEY, \"value\" text NOT",
"IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL PRIMARY KEY,",
"\"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE",
"2.2.12 on 2020-05-05 17:07 from django.db import migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\",",
"); DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX \"worker_cache_table_expires\" ON \"worker_cache_table\" (\"expires\"); \"\"\"",
"NULL PRIMARY KEY, \"value\" text NOT NULL, \"expires\" timestamp with time zone NOT",
"django.db import migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL(",
"class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\" DROP TABLE",
"PRIMARY KEY, \"value\" text NOT NULL, \"expires\" timestamp with time zone NOT NULL",
"varchar(255) NOT NULL PRIMARY KEY, \"value\" text NOT NULL, \"expires\" timestamp with time",
"Django 2.2.12 on 2020-05-05 17:07 from django.db import migrations class Migration(migrations.Migration): dependencies =",
"<filename>koku/reporting_common/migrations/0022_auto_20200505_1707.py # Generated by Django 2.2.12 on 2020-05-05 17:07 from django.db import migrations",
"KEY, \"value\" text NOT NULL, \"expires\" timestamp with time zone NOT NULL );",
"\"value\" text NOT NULL, \"expires\" timestamp with time zone NOT NULL ); DROP",
"zone NOT NULL ); DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX \"worker_cache_table_expires\" ON",
"import migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\"",
"\"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL PRIMARY KEY, \"value\" text NOT NULL, \"expires\"",
"with time zone NOT NULL ); DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX",
"17:07 from django.db import migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations =",
"operations = [ migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\"",
"\"\"\" DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT",
"DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL",
"\"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL PRIMARY KEY, \"value\" text",
"[(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE",
"NOT NULL ); DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX \"worker_cache_table_expires\" ON \"worker_cache_table\"",
"2020-05-05 17:07 from django.db import migrations class Migration(migrations.Migration): dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations",
"\"cache_key\" varchar(255) NOT NULL PRIMARY KEY, \"value\" text NOT NULL, \"expires\" timestamp with",
"= [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS \"worker_cache_table\";",
"NOT NULL, \"expires\" timestamp with time zone NOT NULL ); DROP INDEX IF",
"migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255)",
"NULL, \"expires\" timestamp with time zone NOT NULL ); DROP INDEX IF EXISTS",
"EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\" varchar(255) NOT NULL PRIMARY KEY, \"value\"",
"dependencies = [(\"reporting_common\", \"0021_delete_reportcolumnmap\")] operations = [ migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS",
"( \"cache_key\" varchar(255) NOT NULL PRIMARY KEY, \"value\" text NOT NULL, \"expires\" timestamp",
"timestamp with time zone NOT NULL ); DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE",
"= [ migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" (",
"\"expires\" timestamp with time zone NOT NULL ); DROP INDEX IF EXISTS \"worker_cache_table_expires\";",
"[ migrations.RunSQL( \"\"\" DROP TABLE IF EXISTS \"worker_cache_table\"; CREATE TABLE \"worker_cache_table\" ( \"cache_key\"",
"NULL ); DROP INDEX IF EXISTS \"worker_cache_table_expires\"; CREATE INDEX \"worker_cache_table_expires\" ON \"worker_cache_table\" (\"expires\");",
"by Django 2.2.12 on 2020-05-05 17:07 from django.db import migrations class Migration(migrations.Migration): dependencies"
] |
[
"True except KeyboardInterrupt: cliente_chat.close() print('El chat se está cerrando...') repetir = False except",
"msg = input('>>: ') if msg == 'salir': mensaje = str.encode(msg) # We",
"We must write bytes, not a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg)",
"chat se está cerrando...') repetir = False except ConnectionAbortedError: cliente_chat.close() print('El chat se",
"al servidor_chat a la espera de una respuesta. #necesitmaos convertir el tipo string",
"string cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg) # We must write bytes, not",
"sys.exit(1) #repetir = False else: print(mensaje_server) repetir = True except KeyboardInterrupt: cliente_chat.close() print('El",
"del chat. Pulse el enter para salir.') sys.exit(1) #repetir = False else: print(mensaje_server)",
"not a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg) # We must write",
"respuesta. #necesitmaos convertir el tipo string en bytes para que pueda ser enviada",
"cliente_chat = socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el servidor_chat repetir =",
"must write bytes, not a string cliente_chat.send(mensaje) #Aquí envías un primer mensaje al",
"chat. Pulse el enter para salir.') sys.exit(1) #repetir = False else: print(mensaje_server) repetir",
"if msg == 'salir': mensaje = str.encode(msg) # We must write bytes, not",
"a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg) # We must write bytes,",
"repetir = True except KeyboardInterrupt: cliente_chat.close() print('El chat se está cerrando...') repetir =",
"We must write bytes, not a string cliente_chat.send(mensaje) #Aquí envías un primer mensaje",
"envías un primer mensaje al servidor_chat a la espera de una respuesta. #necesitmaos",
"string cliente_chat.send(mensaje) #Aquí envías un primer mensaje al servidor_chat a la espera de",
"cliente_chat.close() print('El chat se está cerrando...') repetir = False except ConnectionAbortedError: cliente_chat.close() print('El",
"tipo string en bytes para que pueda ser enviada al servidor. ################################################## mensaje_server",
"escrito en el servidor. exit = str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close() print('Usted",
"enter para salir.') sys.exit(1) #repetir = False else: print(mensaje_server) repetir = True except",
"== 'salir': mensaje = str.encode(msg) # We must write bytes, not a string",
"#Aquí envías un primer mensaje al servidor_chat a la espera de una respuesta.",
"except KeyboardInterrupt: cliente_chat.close() print('El chat se está cerrando...') repetir = False except ConnectionAbortedError:",
"if mensaje_server is exit: #cliente_chat.close() print('Usted acaba de salir del chat. Pulse el",
"repetir = True while repetir: try: ################################################## msg = input('>>: ') if msg",
"conecta con el servidor_chat repetir = True while repetir: try: ################################################## msg =",
"<gh_stars>0 import socket import sys cliente_chat = socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta",
"print('El chat se está cerrando...') repetir = False except ConnectionAbortedError: cliente_chat.close() print('El chat",
"= cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has escrito en el servidor. exit",
"en el servidor. exit = str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close() print('Usted acaba",
"string en bytes para que pueda ser enviada al servidor. ################################################## mensaje_server =",
"print('Usted acaba de salir del chat. Pulse el enter para salir.') sys.exit(1) #repetir",
"que pueda ser enviada al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el",
"que has escrito en el servidor. exit = str.encode(\"salir\") if mensaje_server is exit:",
"str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close() print('Usted acaba de salir del chat. Pulse",
"import sys cliente_chat = socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el servidor_chat",
"socket import sys cliente_chat = socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el",
"servidor_chat a la espera de una respuesta. #necesitmaos convertir el tipo string en",
"servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has escrito en",
"de salir del chat. Pulse el enter para salir.') sys.exit(1) #repetir = False",
"el mensaje que has escrito en el servidor. exit = str.encode(\"salir\") if mensaje_server",
"cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el servidor_chat repetir = True while repetir:",
"mensaje al servidor_chat a la espera de una respuesta. #necesitmaos convertir el tipo",
"has escrito en el servidor. exit = str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close()",
"pueda ser enviada al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje",
"KeyboardInterrupt: cliente_chat.close() print('El chat se está cerrando...') repetir = False except ConnectionAbortedError: cliente_chat.close()",
"#necesitmaos convertir el tipo string en bytes para que pueda ser enviada al",
"socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el servidor_chat repetir = True while",
"con el servidor_chat repetir = True while repetir: try: ################################################## msg = input('>>:",
"para que pueda ser enviada al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener",
"bytes, not a string cliente_chat.send(mensaje) #Aquí envías un primer mensaje al servidor_chat a",
"'salir': mensaje = str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje)",
"else: print(mensaje_server) repetir = True except KeyboardInterrupt: cliente_chat.close() print('El chat se está cerrando...')",
"= False else: print(mensaje_server) repetir = True except KeyboardInterrupt: cliente_chat.close() print('El chat se",
"la espera de una respuesta. #necesitmaos convertir el tipo string en bytes para",
"= False except ConnectionAbortedError: cliente_chat.close() print('El chat se está cerrando...') repetir = False",
"('192.168.1.45',8080) ) #Se conecta con el servidor_chat repetir = True while repetir: try:",
"para salir.') sys.exit(1) #repetir = False else: print(mensaje_server) repetir = True except KeyboardInterrupt:",
"= str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje) #Aquí envías",
"# We must write bytes, not a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje =",
"espera de una respuesta. #necesitmaos convertir el tipo string en bytes para que",
"un primer mensaje al servidor_chat a la espera de una respuesta. #necesitmaos convertir",
"#cliente_chat.close() print('Usted acaba de salir del chat. Pulse el enter para salir.') sys.exit(1)",
"mensaje_server is exit: #cliente_chat.close() print('Usted acaba de salir del chat. Pulse el enter",
"################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has escrito en el",
") #Se conecta con el servidor_chat repetir = True while repetir: try: ##################################################",
"una respuesta. #necesitmaos convertir el tipo string en bytes para que pueda ser",
"el tipo string en bytes para que pueda ser enviada al servidor. ##################################################",
"a string cliente_chat.send(mensaje) #Aquí envías un primer mensaje al servidor_chat a la espera",
"not a string cliente_chat.send(mensaje) #Aquí envías un primer mensaje al servidor_chat a la",
"en bytes para que pueda ser enviada al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí",
"mensaje = str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje) sys.exit(1)",
"else: mensaje = str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje)",
"import socket import sys cliente_chat = socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con",
"mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has escrito en el servidor.",
"while repetir: try: ################################################## msg = input('>>: ') if msg == 'salir': mensaje",
"Pulse el enter para salir.') sys.exit(1) #repetir = False else: print(mensaje_server) repetir =",
"convertir el tipo string en bytes para que pueda ser enviada al servidor.",
"') if msg == 'salir': mensaje = str.encode(msg) # We must write bytes,",
"try: ################################################## msg = input('>>: ') if msg == 'salir': mensaje = str.encode(msg)",
"cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg) # We must write bytes, not a",
"servidor_chat repetir = True while repetir: try: ################################################## msg = input('>>: ') if",
"obtener el mensaje que has escrito en el servidor. exit = str.encode(\"salir\") if",
"acaba de salir del chat. Pulse el enter para salir.') sys.exit(1) #repetir =",
"= True except KeyboardInterrupt: cliente_chat.close() print('El chat se está cerrando...') repetir = False",
"cerrando...') repetir = False except ConnectionAbortedError: cliente_chat.close() print('El chat se está cerrando...') repetir",
"write bytes, not a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg) # We",
"el enter para salir.') sys.exit(1) #repetir = False else: print(mensaje_server) repetir = True",
"cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has escrito en el servidor. exit =",
"################################################## msg = input('>>: ') if msg == 'salir': mensaje = str.encode(msg) #",
"str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje",
"de una respuesta. #necesitmaos convertir el tipo string en bytes para que pueda",
"= input('>>: ') if msg == 'salir': mensaje = str.encode(msg) # We must",
"bytes para que pueda ser enviada al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes",
"está cerrando...') repetir = False except ConnectionAbortedError: cliente_chat.close() print('El chat se está cerrando...')",
"= True while repetir: try: ################################################## msg = input('>>: ') if msg ==",
"exit: #cliente_chat.close() print('Usted acaba de salir del chat. Pulse el enter para salir.')",
"servidor. exit = str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close() print('Usted acaba de salir",
"ser enviada al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que",
"sys.exit(1) else: mensaje = str.encode(msg) # We must write bytes, not a string",
"print(mensaje_server) repetir = True except KeyboardInterrupt: cliente_chat.close() print('El chat se está cerrando...') repetir",
"input('>>: ') if msg == 'salir': mensaje = str.encode(msg) # We must write",
"#Se conecta con el servidor_chat repetir = True while repetir: try: ################################################## msg",
"#repetir = False else: print(mensaje_server) repetir = True except KeyboardInterrupt: cliente_chat.close() print('El chat",
"a la espera de una respuesta. #necesitmaos convertir el tipo string en bytes",
"al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has escrito",
"= socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el servidor_chat repetir = True",
"el servidor_chat repetir = True while repetir: try: ################################################## msg = input('>>: ')",
"mensaje = str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje) #Aquí",
"= str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close() print('Usted acaba de salir del chat.",
"exit = str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close() print('Usted acaba de salir del",
"is exit: #cliente_chat.close() print('Usted acaba de salir del chat. Pulse el enter para",
"mensaje que has escrito en el servidor. exit = str.encode(\"salir\") if mensaje_server is",
"# We must write bytes, not a string cliente_chat.send(mensaje) #Aquí envías un primer",
"write bytes, not a string cliente_chat.send(mensaje) #Aquí envías un primer mensaje al servidor_chat",
"puedes obtener el mensaje que has escrito en el servidor. exit = str.encode(\"salir\")",
"must write bytes, not a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg) #",
"cliente_chat.send(mensaje) #Aquí envías un primer mensaje al servidor_chat a la espera de una",
"msg == 'salir': mensaje = str.encode(msg) # We must write bytes, not a",
"True while repetir: try: ################################################## msg = input('>>: ') if msg == 'salir':",
"salir.') sys.exit(1) #repetir = False else: print(mensaje_server) repetir = True except KeyboardInterrupt: cliente_chat.close()",
"enviada al servidor. ################################################## mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has",
"sys cliente_chat = socket.socket() cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el servidor_chat repetir",
"str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje) #Aquí envías un",
"repetir = False except ConnectionAbortedError: cliente_chat.close() print('El chat se está cerrando...') repetir =",
"el servidor. exit = str.encode(\"salir\") if mensaje_server is exit: #cliente_chat.close() print('Usted acaba de",
"salir del chat. Pulse el enter para salir.') sys.exit(1) #repetir = False else:",
"primer mensaje al servidor_chat a la espera de una respuesta. #necesitmaos convertir el",
"= str.encode(msg) # We must write bytes, not a string cliente_chat.send(mensaje) sys.exit(1) else:",
"bytes, not a string cliente_chat.send(mensaje) sys.exit(1) else: mensaje = str.encode(msg) # We must",
"False else: print(mensaje_server) repetir = True except KeyboardInterrupt: cliente_chat.close() print('El chat se está",
"repetir: try: ################################################## msg = input('>>: ') if msg == 'salir': mensaje =",
"se está cerrando...') repetir = False except ConnectionAbortedError: cliente_chat.close() print('El chat se está"
] |
[
"pragma: no cover \"\"\"Compare original and modified MuJoCo v3 envs.\"\"\" train_timesteps = 200000",
"cover \"\"\"Compare original and modified MuJoCo v3 envs.\"\"\" train_timesteps = 200000 gym_reward, _",
"some cases. # See discussion in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\",",
"GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], )",
"for `total_timesteps` on `env_name` and evaluate returns.\"\"\" env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\",",
"\"\"\"Compare original and modified MuJoCo v3 envs.\"\"\" train_timesteps = 200000 gym_reward, _ =",
"import Tuple import gym import pytest import stable_baselines3 from stable_baselines3.common import evaluation import",
"from typing import Tuple import gym import pytest import stable_baselines3 from stable_baselines3.common import",
"returns.\"\"\" env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env)",
") epsilon = 0.1 sign = 1 if gym_reward > 0 else -1",
"\"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover \"\"\"Compare original and modified",
"pragma: no cover \"\"\"Train PPO2 for `total_timesteps` on `env_name` and evaluate returns.\"\"\" env",
"for env registration def _eval_env( env_name: str, total_timesteps: int, ) -> Tuple[float, float]:",
"# SOMEDAY(adam): tests are flaky and consistently fail in some environments # Unclear",
"# pragma: no cover \"\"\"Train PPO2 for `total_timesteps` on `env_name` and evaluate returns.\"\"\"",
"cover \"\"\"Train PPO2 for `total_timesteps` on `env_name` and evaluate returns.\"\"\" env = gym.make(env_name)",
"float) return res # SOMEDAY(adam): tests are flaky and consistently fail in some",
") def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover \"\"\"Compare original and modified MuJoCo",
"float]: # pragma: no cover \"\"\"Train PPO2 for `total_timesteps` on `env_name` and evaluate",
"and evaluate returns.\"\"\" env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res =",
"and consistently fail in some environments # Unclear if they even should pass",
"no cover \"\"\"Compare original and modified MuJoCo v3 envs.\"\"\" train_timesteps = 200000 gym_reward,",
"= gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert isinstance(res[0],",
"test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover \"\"\"Compare original and modified MuJoCo v3 envs.\"\"\"",
"int, ) -> Tuple[float, float]: # pragma: no cover \"\"\"Train PPO2 for `total_timesteps`",
"registration def _eval_env( env_name: str, total_timesteps: int, ) -> Tuple[float, float]: # pragma:",
"\"\"\"Train PPO2 for `total_timesteps` on `env_name` and evaluate returns.\"\"\" env = gym.make(env_name) model",
"str, total_timesteps: int, ) -> Tuple[float, float]: # pragma: no cover \"\"\"Train PPO2",
"in some environments # Unclear if they even should pass in some cases.",
"should pass in some cases. # See discussion in GH#6 and GH#40. @pytest.mark.expensive",
"some environments # Unclear if they even should pass in some cases. #",
"and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def",
"f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1 sign = 1 if gym_reward > 0",
"adapted environments.\"\"\" from typing import Tuple import gym import pytest import stable_baselines3 from",
"0 else -1 assert (1 - sign * epsilon) * gym_reward <= fixed_reward",
"v3 envs.\"\"\" train_timesteps = 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ =",
"= stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return res",
"train_timesteps = 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\",",
"\"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover \"\"\"Compare",
"PPO2 for `total_timesteps` on `env_name` and evaluate returns.\"\"\" env = gym.make(env_name) model =",
"@pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str):",
"pytest import stable_baselines3 from stable_baselines3.common import evaluation import seals # noqa: F401 Import",
"import evaluation import seals # noqa: F401 Import required for env registration def",
"they even should pass in some cases. # See discussion in GH#6 and",
"on `env_name` and evaluate returns.\"\"\" env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps)",
"env) assert isinstance(res[0], float) return res # SOMEDAY(adam): tests are flaky and consistently",
"Tuple[float, float]: # pragma: no cover \"\"\"Train PPO2 for `total_timesteps` on `env_name` and",
"environments.\"\"\" from typing import Tuple import gym import pytest import stable_baselines3 from stable_baselines3.common",
"gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert isinstance(res[0], float)",
"_eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1 sign = 1 if gym_reward >",
"seals # noqa: F401 Import required for env registration def _eval_env( env_name: str,",
"<filename>tests/test_mujoco_rl.py \"\"\"Test RL on MuJoCo adapted environments.\"\"\" from typing import Tuple import gym",
"RL on MuJoCo adapted environments.\"\"\" from typing import Tuple import gym import pytest",
"def _eval_env( env_name: str, total_timesteps: int, ) -> Tuple[float, float]: # pragma: no",
"gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon",
"GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base:",
"stable_baselines3 from stable_baselines3.common import evaluation import seals # noqa: F401 Import required for",
"on MuJoCo adapted environments.\"\"\" from typing import Tuple import gym import pytest import",
"import seals # noqa: F401 Import required for env registration def _eval_env( env_name:",
"noqa: F401 Import required for env registration def _eval_env( env_name: str, total_timesteps: int,",
"from stable_baselines3.common import evaluation import seals # noqa: F401 Import required for env",
"[\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no",
"stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return res #",
"stable_baselines3.common import evaluation import seals # noqa: F401 Import required for env registration",
"evaluation import seals # noqa: F401 Import required for env registration def _eval_env(",
"evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return res # SOMEDAY(adam): tests are flaky and",
"tests are flaky and consistently fail in some environments # Unclear if they",
"See discussion in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\",",
"if they even should pass in some cases. # See discussion in GH#6",
"Import required for env registration def _eval_env( env_name: str, total_timesteps: int, ) ->",
"`total_timesteps` on `env_name` and evaluate returns.\"\"\" env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env)",
"# noqa: F401 Import required for env registration def _eval_env( env_name: str, total_timesteps:",
"assert isinstance(res[0], float) return res # SOMEDAY(adam): tests are flaky and consistently fail",
"-> Tuple[float, float]: # pragma: no cover \"\"\"Train PPO2 for `total_timesteps` on `env_name`",
"isinstance(res[0], float) return res # SOMEDAY(adam): tests are flaky and consistently fail in",
"MuJoCo v3 envs.\"\"\" train_timesteps = 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _",
"cases. # See discussion in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\",",
"# Unclear if they even should pass in some cases. # See discussion",
"epsilon = 0.1 sign = 1 if gym_reward > 0 else -1 assert",
"= _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1",
"\"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover",
"modified MuJoCo v3 envs.\"\"\" train_timesteps = 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward,",
"fail in some environments # Unclear if they even should pass in some",
"model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return",
"= evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return res # SOMEDAY(adam): tests are flaky",
"env_name: str, total_timesteps: int, ) -> Tuple[float, float]: # pragma: no cover \"\"\"Train",
"in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"],",
"F401 Import required for env registration def _eval_env( env_name: str, total_timesteps: int, )",
"res # SOMEDAY(adam): tests are flaky and consistently fail in some environments #",
"res = evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return res # SOMEDAY(adam): tests are",
"discussion in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\",",
"= 0.1 sign = 1 if gym_reward > 0 else -1 assert (1",
"return res # SOMEDAY(adam): tests are flaky and consistently fail in some environments",
"are flaky and consistently fail in some environments # Unclear if they even",
"Tuple import gym import pytest import stable_baselines3 from stable_baselines3.common import evaluation import seals",
"gym_reward > 0 else -1 assert (1 - sign * epsilon) * gym_reward",
"Unclear if they even should pass in some cases. # See discussion in",
"_eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1 sign",
"consistently fail in some environments # Unclear if they even should pass in",
"even should pass in some cases. # See discussion in GH#6 and GH#40.",
"str): # pragma: no cover \"\"\"Compare original and modified MuJoCo v3 envs.\"\"\" train_timesteps",
"# pragma: no cover \"\"\"Compare original and modified MuJoCo v3 envs.\"\"\" train_timesteps =",
"env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert",
"model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return res # SOMEDAY(adam): tests",
"original and modified MuJoCo v3 envs.\"\"\" train_timesteps = 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\",",
"env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model, env) assert isinstance(res[0], float) return res # SOMEDAY(adam):",
"# See discussion in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\",",
"envs.\"\"\" train_timesteps = 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env(",
"_ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon =",
"`env_name` and evaluate returns.\"\"\" env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res",
"environments # Unclear if they even should pass in some cases. # See",
"MuJoCo adapted environments.\"\"\" from typing import Tuple import gym import pytest import stable_baselines3",
"1 if gym_reward > 0 else -1 assert (1 - sign * epsilon)",
"_ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1 sign = 1 if",
"> 0 else -1 assert (1 - sign * epsilon) * gym_reward <=",
"0.1 sign = 1 if gym_reward > 0 else -1 assert (1 -",
"_eval_env( env_name: str, total_timesteps: int, ) -> Tuple[float, float]: # pragma: no cover",
"total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1 sign =",
"import stable_baselines3 from stable_baselines3.common import evaluation import seals # noqa: F401 Import required",
"\"\"\"Test RL on MuJoCo adapted environments.\"\"\" from typing import Tuple import gym import",
"pass in some cases. # See discussion in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize(",
"\"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma:",
"no cover \"\"\"Train PPO2 for `total_timesteps` on `env_name` and evaluate returns.\"\"\" env =",
") -> Tuple[float, float]: # pragma: no cover \"\"\"Train PPO2 for `total_timesteps` on",
"typing import Tuple import gym import pytest import stable_baselines3 from stable_baselines3.common import evaluation",
"env registration def _eval_env( env_name: str, total_timesteps: int, ) -> Tuple[float, float]: #",
"= 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps,",
"if gym_reward > 0 else -1 assert (1 - sign * epsilon) *",
"fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1 sign = 1",
"import pytest import stable_baselines3 from stable_baselines3.common import evaluation import seals # noqa: F401",
"= _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, ) epsilon = 0.1 sign = 1 if gym_reward",
"and modified MuJoCo v3 envs.\"\"\" train_timesteps = 200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps)",
"= 1 if gym_reward > 0 else -1 assert (1 - sign *",
"gym import pytest import stable_baselines3 from stable_baselines3.common import evaluation import seals # noqa:",
"evaluate returns.\"\"\" env = gym.make(env_name) model = stable_baselines3.PPO(\"MlpPolicy\", env) model.learn(total_timesteps=total_timesteps) res = evaluation.evaluate_policy(model,",
"total_timesteps: int, ) -> Tuple[float, float]: # pragma: no cover \"\"\"Train PPO2 for",
"required for env registration def _eval_env( env_name: str, total_timesteps: int, ) -> Tuple[float,",
"def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover \"\"\"Compare original and modified MuJoCo v3",
"SOMEDAY(adam): tests are flaky and consistently fail in some environments # Unclear if",
"200000 gym_reward, _ = _eval_env(f\"{env_base}-v3\", total_timesteps=train_timesteps) fixed_reward, _ = _eval_env( f\"seals/{env_base}-v0\", total_timesteps=train_timesteps, )",
"@pytest.mark.parametrize( \"env_base\", [\"HalfCheetah\", \"Ant\", \"Hopper\", \"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): #",
"flaky and consistently fail in some environments # Unclear if they even should",
"sign = 1 if gym_reward > 0 else -1 assert (1 - sign",
"total_timesteps=train_timesteps, ) epsilon = 0.1 sign = 1 if gym_reward > 0 else",
"import gym import pytest import stable_baselines3 from stable_baselines3.common import evaluation import seals #",
"in some cases. # See discussion in GH#6 and GH#40. @pytest.mark.expensive @pytest.mark.parametrize( \"env_base\",",
"\"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover \"\"\"Compare original and",
"\"Humanoid\", \"Swimmer\", \"Walker2d\"], ) def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover \"\"\"Compare original"
] |
[
"% 2 == 0: print('Essa expressão está correta!') else: print('essa expressão esta Equivocada!')",
"print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2 == 0 and quant_Abrir % 2 ==",
"2 == 0 and quant_Abrir % 2 == 0: print('Essa expressão está correta!')",
"% 2 == 0 and quant_Abrir % 2 == 0: print('Essa expressão está",
"expressão: ')) quant_Abrir = valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar %",
"= str(input('Digite uma expressão: ')) quant_Abrir = valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir)",
"quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2 == 0 and quant_Abrir",
"print(quant_Abrir) if quant_Fechar % 2 == 0 and quant_Abrir % 2 == 0:",
"and quant_Abrir % 2 == 0: print('Essa expressão está correta!') else: print('essa expressão",
"= valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2 == 0",
"valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2 == 0 and quant_Abrir % 2",
"quant_Abrir = valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2 ==",
"0 and quant_Abrir % 2 == 0: print('Essa expressão está correta!') else: print('essa",
"valor = str(input('Digite uma expressão: ')) quant_Abrir = valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar)",
"<reponame>thiagofreitascarneiro/Curso-de-Python---Curso-em-Video<gh_stars>1-10 valor = str(input('Digite uma expressão: ')) quant_Abrir = valor.count('(') quant_Fechar = valor.count(')')",
"uma expressão: ')) quant_Abrir = valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar",
"quant_Fechar % 2 == 0 and quant_Abrir % 2 == 0: print('Essa expressão",
"== 0 and quant_Abrir % 2 == 0: print('Essa expressão está correta!') else:",
"quant_Abrir % 2 == 0: print('Essa expressão está correta!') else: print('essa expressão esta",
"str(input('Digite uma expressão: ')) quant_Abrir = valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if",
"if quant_Fechar % 2 == 0 and quant_Abrir % 2 == 0: print('Essa",
"= valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2 == 0 and quant_Abrir %",
"valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2 == 0 and",
"')) quant_Abrir = valor.count('(') quant_Fechar = valor.count(')') print(quant_Fechar) print(quant_Abrir) if quant_Fechar % 2"
] |
[
"in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features))",
"= inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\" Returns an",
"yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential(",
"# When this is true is called the \"transductive setting\" ), activation )",
"''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) )",
"and not dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier =",
"= nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size) for j",
"out_features)) self.hidden_size = self.hidden_size ''' if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1))",
"for j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(),",
"= in_channels self.out_features = out_features self.hidden_size = hidden_size assert task_conv >= 0, \"Wrong",
"self.classifier(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters",
"forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return logits def",
"self.features(inputs) return features def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters",
"j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape,",
"Returns an iterator over the trainable parameters of the model. \"\"\" for param",
"out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size) for j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier,",
"conv3x3(hidden_size, hidden_size) for j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier =",
"return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When",
"nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in",
"out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier =",
"of the model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param class",
"ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features =",
"momentum=1., affine=True, track_running_stats=False # When this is true is called the \"transductive setting\"",
"range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None): features = self.features(inputs)",
"def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return logits",
"hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features = out_features self.hidden_size = hidden_size assert",
"not dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size,",
"nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits",
"inputs, params=None): features = self.features(inputs) return features def trainable_parameters(self): \"\"\" Returns an iterator",
"nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs)",
"def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features = out_features if",
"params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\"",
"the \"transductive setting\" ), activation ) class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__()",
"import torch.nn as nn import logging logger = logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return",
"#features = inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\" Returns",
"task_conv ==0 and not dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc:",
"param in self.parameters(): if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features,",
"nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j",
"in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None): #features =",
"hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None): #features =",
"num_layer>1: self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features =",
"called the \"transductive setting\" ), activation ) class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer,",
"nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return",
"logits def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters of the",
"= hidden_size assert task_conv >= 0, \"Wrong call for task nets!\" self.features =",
"\"\"\" Returns an iterator over the trainable parameters of the model. \"\"\" for",
"if param.requires_grad: yield param class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape",
"param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels",
"j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None): #features",
"==0 and not dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier",
"if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features",
"for j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None):",
"def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features = out_features",
"out_features if task_conv ==0 and not dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features))",
"an iterator over the trainable parameters of the model. \"\"\" for param in",
"padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When this is true is called",
"num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size",
"= nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' if num_layer>1: self.classifier =",
"self.in_shape = in_shape self.out_features = out_features if task_conv ==0 and not dfc: self.classifier",
"nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size) for j in range(task_conv-1): self.classifier",
"nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier =",
"logging logger = logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1),",
"= self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over the trainable",
"self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' self.fc_net",
"class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape,",
"model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def",
"task_conv >= 0, \"Wrong call for task nets!\" self.features = conv3x3(in_channels, hidden_size) for",
"= self.classifier(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over the trainable",
"self.hidden_size ''' if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs,",
"= out_features self.hidden_size = hidden_size assert task_conv >= 0, \"Wrong call for task",
"= self.hidden_size ''' if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self,",
"inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return logits def trainable_parameters(self):",
"self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def",
"nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When this is",
"''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' self.fc_net =",
"inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator",
"conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None): #features",
"self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self,",
"= out_features if task_conv ==0 and not dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape,",
"-1)) logits = self.classifier(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over",
"__init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size =",
"model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def",
"param in self.parameters(): if param.requires_grad: yield param class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True):",
"''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' if num_layer>1:",
"forward(self, inputs, params=None): features = self.features(inputs) return features def trainable_parameters(self): \"\"\" Returns an",
"this is true is called the \"transductive setting\" ), activation ) class FullyConnectedLayer(nn.Module):",
"self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters",
"the model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module):",
"def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True,",
"hidden_size) for i in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs,",
"conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False",
"hidden_size) for j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier,",
"self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0),",
"iterator over the trainable parameters of the model. \"\"\" for param in self.parameters():",
"self.parameters(): if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() '''",
"self.out_features = out_features if task_conv ==0 and not dfc: self.classifier = nn.Sequential( nn.Flatten(),",
"self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(),",
"setting\" ), activation ) class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier",
"self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64))",
"def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size",
"nn.Linear(in_shape, out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier",
"nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits =",
"yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels =",
"self.hidden_size = self.hidden_size ''' if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def",
"nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs,",
"over the trainable parameters of the model. \"\"\" for param in self.parameters(): if",
"param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(),",
"torch.nn as nn import logging logger = logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential(",
"def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return logits",
"= in_shape self.out_features = out_features if task_conv ==0 and not dfc: self.classifier =",
"as nn import logging logger = logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels,",
"def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size",
"= self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net,",
"#features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return logits def trainable_parameters(self): \"\"\" Returns",
"task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size",
"When this is true is called the \"transductive setting\" ), activation ) class",
"nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier",
"def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters of the model.",
"range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def",
"__init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size =",
"\"\"\" for param in self.parameters(): if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self,",
"out_features self.hidden_size = hidden_size assert task_conv >= 0, \"Wrong call for task nets!\"",
"self).__init__() self.in_shape = in_shape self.out_features = out_features if task_conv ==0 and not dfc:",
"\"Wrong call for task nets!\" self.features = conv3x3(in_channels, hidden_size) for i in range(3-task_conv):",
"yield param class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape",
"\"\"\" for param in self.parameters(): if param.requires_grad: yield param class TaskLinearLayer(nn.Module): def __init__(self,",
"= conv3x3(hidden_size, hidden_size) for j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier",
"__init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features = out_features self.hidden_size",
"logits = self.classifier(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over the",
"super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size '''",
"inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator",
"hidden_size assert task_conv >= 0, \"Wrong call for task nets!\" self.features = conv3x3(in_channels,",
"super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size '''",
"self.parameters(): if param.requires_grad: yield param class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__()",
"param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier =",
"= nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1))",
"model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param class TaskLinearLayer(nn.Module): def",
"for param in self.parameters(): if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0):",
"FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features))",
"nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When this is true is called the \"transductive",
"the model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param class TaskLinearLayer(nn.Module):",
"self.classifier = conv3x3(hidden_size, hidden_size) for j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus()))",
"nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size) for j in range(task_conv-1): self.classifier =",
"TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features))",
"if task_conv ==0 and not dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif",
"= nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs,",
"param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels",
"super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features = out_features self.hidden_size = hidden_size assert task_conv",
"i in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None): features",
"nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs)",
"for i in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None):",
"= nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None): features = self.features(inputs) return features",
"self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)",
"for param in self.parameters(): if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels,",
"self.hidden_size = hidden_size assert task_conv >= 0, \"Wrong call for task nets!\" self.features",
"TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features = out_features",
"logger = logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2),",
"= nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128,",
"nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else:",
"nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features))",
"= nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1))",
"features def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters of the",
"params=None): features = self.features(inputs) return features def trainable_parameters(self): \"\"\" Returns an iterator over",
"__init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features = out_features if task_conv",
"else: self.classifier = conv3x3(hidden_size, hidden_size) for j in range(task_conv-1): self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size,",
"self).__init__() self.in_channels = in_channels self.out_features = out_features self.hidden_size = hidden_size assert task_conv >=",
"activation ) class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential(",
"track_running_stats=False # When this is true is called the \"transductive setting\" ), activation",
"elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size,",
"nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus())) self.classifier = nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None):",
"call for task nets!\" self.features = conv3x3(in_channels, hidden_size) for i in range(3-task_conv): self.features",
"in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features = out_features if task_conv ==0",
"self.features = conv3x3(in_channels, hidden_size) for i in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size))",
"= logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels,",
"logits = self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over the",
"true is called the \"transductive setting\" ), activation ) class FullyConnectedLayer(nn.Module): def __init__(self,",
"= nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for",
"class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features =",
"def forward(self, inputs, params=None): features = self.features(inputs) return features def trainable_parameters(self): \"\"\" Returns",
"3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When this is true is",
"affine=True, track_running_stats=False # When this is true is called the \"transductive setting\" ),",
"nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size) for j in",
"nets!\" self.features = conv3x3(in_channels, hidden_size) for i in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size,",
"in self.parameters(): if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0):",
"the trainable parameters of the model. \"\"\" for param in self.parameters(): if param.requires_grad:",
"param in self.parameters(): if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer,",
"out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When this is true",
"nn import logging logger = logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels,",
"self.out_features = out_features self.hidden_size = hidden_size assert task_conv >= 0, \"Wrong call for",
"range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0),",
"nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When this",
"param class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features",
"-1)) logits = self.fc_net(inputs) return logits def trainable_parameters(self): \"\"\" Returns an iterator over",
") class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(),",
"the model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module):",
"dfc: self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) elif dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()),",
"self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size) for",
"self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits =",
"features = self.features(inputs) return features def trainable_parameters(self): \"\"\" Returns an iterator over the",
"dfc: self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size)",
"self.parameters(): if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork,",
"if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__() ''' self.classifier",
"in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features = out_features self.hidden_size =",
"= conv3x3(in_channels, hidden_size) for i in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def",
"nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None): features = self.features(inputs) return features def",
"in self.parameters(): if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1, task_conv=0): super(TaskFullyConnectedLayer, self).__init__()",
"class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features",
"out_features)) self.hidden_size = self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net",
"out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features = out_features if task_conv ==0 and",
"hidden_size)) def forward(self, inputs, params=None): features = self.features(inputs) return features def trainable_parameters(self): \"\"\"",
"for task nets!\" self.features = conv3x3(in_channels, hidden_size) for i in range(3-task_conv): self.features =",
"= nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits",
"forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return logits def",
"logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1.,",
"0, \"Wrong call for task nets!\" self.features = conv3x3(in_channels, hidden_size) for i in",
"nn.Sequential(self.classifier, nn.Flatten(), nn.Linear(in_shape, out_features)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits",
") def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.fc_net(inputs) return",
"self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1))",
"), activation ) class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier =",
"trainable parameters of the model. \"\"\" for param in self.parameters(): if param.requires_grad: yield",
"import logging logger = logging.getLogger(__name__) def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3,",
"self.hidden_size = self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net =",
"parameters of the model. \"\"\" for param in self.parameters(): if param.requires_grad: yield param",
"''' if num_layer>1: self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None):",
"for param in self.parameters(): if param.requires_grad: yield param class TaskLinearLayer(nn.Module): def __init__(self, in_shape,",
"= inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return logits def trainable_parameters(self): \"\"\" Returns an",
"= self.features(inputs) return features def trainable_parameters(self): \"\"\" Returns an iterator over the trainable",
"inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return logits def trainable_parameters(self):",
"conv3x3(in_channels, hidden_size) for i in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self,",
"conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None): features = self.features(inputs) return features def trainable_parameters(self):",
"self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None): features = self.features(inputs) return",
"self.classifier = nn.Linear(64,1) self.classifier = nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0),",
"class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape,",
"assert task_conv >= 0, \"Wrong call for task nets!\" self.features = conv3x3(in_channels, hidden_size)",
"\"transductive setting\" ), activation ) class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2): super(FullyConnectedLayer, self).__init__() '''",
"in self.parameters(): if param.requires_grad: yield param class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer,",
"\"\"\" for param in self.parameters(): if param.requires_grad: yield param class TaskFullyConnectedLayer(nn.Module): def __init__(self,num_layer=1,",
"is called the \"transductive setting\" ), activation ) class FullyConnectedLayer(nn.Module): def __init__(self, num_layer=2):",
"params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return logits def trainable_parameters(self): \"\"\"",
"out_channels,activation=nn.ReLU(inplace=True)): return nn.Sequential( nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False #",
"in_channels self.out_features = out_features self.hidden_size = hidden_size assert task_conv >= 0, \"Wrong call",
"= nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1): self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self,",
"return logits def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters of",
"self.in_channels = in_channels self.out_features = out_features self.hidden_size = hidden_size assert task_conv >= 0,",
"is true is called the \"transductive setting\" ), activation ) class FullyConnectedLayer(nn.Module): def",
"nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)) for j in range(num_layer-1):",
"task nets!\" self.features = conv3x3(in_channels, hidden_size) for i in range(3-task_conv): self.features = nn.Sequential(self.features,",
"in range(3-task_conv): self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size)) def forward(self, inputs, params=None): features =",
"super(TaskLinearLayer, self).__init__() self.in_shape = in_shape self.out_features = out_features if task_conv ==0 and not",
"in_shape self.out_features = out_features if task_conv ==0 and not dfc: self.classifier = nn.Sequential(",
"if param.requires_grad: yield param class ConvolutionalNeuralNetwork(nn.Module): def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__()",
"nn.MaxPool2d(2), nn.BatchNorm2d(out_channels, momentum=1., affine=True, track_running_stats=False # When this is true is called the",
"out_features)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs) return",
"return features def trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters of",
"out_features, hidden_size=32,device=None,task_conv=0): super(ConvolutionalNeuralNetwork, self).__init__() self.in_channels = in_channels self.out_features = out_features self.hidden_size = hidden_size",
"nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64) ) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits =",
"self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' if num_layer>1: self.classifier",
"hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features)) else: self.classifier = conv3x3(hidden_size, hidden_size) for j in range(task_conv-1):",
"= nn.Sequential(nn.Linear(64,1)) def forward(self, inputs, params=None): #features = inputs.view((inputs.size(0), -1)) logits = self.classifier(inputs)",
"param.requires_grad: yield param class TaskLinearLayer(nn.Module): def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True): super(TaskLinearLayer, self).__init__() self.in_shape =",
">= 0, \"Wrong call for task nets!\" self.features = conv3x3(in_channels, hidden_size) for i",
"nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' if num_layer>1: self.classifier = nn.Linear(64,1)",
"self).__init__() ''' self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(in_shape, out_features)) self.hidden_size = self.hidden_size ''' if",
"trainable_parameters(self): \"\"\" Returns an iterator over the trainable parameters of the model. \"\"\""
] |
[
"If set will pad the sequence to a multiple of the provided value.",
":obj:`max_length` or to the maximum acceptable input length for the model if that",
"the targets under the argument :obj:`labels`. Check your model's documentation for all accepted",
"else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler()",
":obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different",
"args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0",
"inputs and labels since they have to be of different lenghts and need",
"load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids",
"# Build the sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name is",
"Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to",
"LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator that will dynamically pad the",
"progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs):",
"methods input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features] label_features = [{\"input_ids\": feature[\"labels\"]}",
"ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1:",
"def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self)",
"transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator that will dynamically",
"collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor",
"feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with",
"# lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None model_input_name =",
"processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics",
"# solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part of",
"loss.detach() # add less aggressive smoothing to progress bar for better estimate class",
"**kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution from",
"import tqdm from transformers import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler,",
"or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding",
"``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If",
") with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) #",
"elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add less aggressive smoothing to",
"to the longest sequence in the batch (or no padding if only a",
"Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). \"\"\" processor:",
"Any]]`): The inputs and targets of the model. The dictionary will be unpacked",
"1: if model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss",
"if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs)",
"The inputs and targets of the model. The dictionary will be unpacked before",
"(:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will",
"or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned",
"inputs and targets of the model. The dictionary will be unpacked before being",
"labels since they have to be of different lenghts and need # different",
"compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100]",
"self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(",
"== -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to",
"to the model. Most models expect the targets under the argument :obj:`labels`. Check",
"No padding (i.e., can output a batch with sequences of different lengths). max_length",
"labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return batch class CTCTrainer(Trainer): def training_step(self,",
"not isinstance( self.train_dataset, collections.abc.Sized ): return None # Build the sampler. if self.args.group_by_length:",
"split inputs and labels since they have to be of different lenghts and",
"inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction ==",
"Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be",
"def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ):",
"else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None if",
"Union import numpy as np import torch import transformers from audioengine.metrics.wer import Jiwer",
"inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform a training step on a",
"and targets of the model. The dictionary will be unpacked before being fed",
"-> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None",
"7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] =",
"pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int],",
"List, Optional, Union import numpy as np import torch import transformers from audioengine.metrics.wer",
"super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids =",
"list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of",
"with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu",
"self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add less aggressive smoothing to progress bar",
">= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']\")",
"import autocast from tqdm import tqdm from transformers import ( Trainer, Wav2Vec2Processor, )",
"None # Build the sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name",
"input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding,",
"the model. The dictionary will be unpacked before being fed to the model.",
"numpy as np import torch import transformers from audioengine.metrics.wer import Jiwer from datasets",
"= self.train_dataset[self.length_field_name] if self.length_field_name is not None else None model_input_name = self.tokenizer.model_input_names[0] if",
"Choose one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1: loss = loss /",
"a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of",
"single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with",
"length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list",
"model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits",
"import nn from torch.cuda.amp import autocast from tqdm import tqdm from transformers import",
"return loss.detach() # add less aggressive smoothing to progress bar for better estimate",
"for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero:",
"from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator that will",
"= [{\"input_ids\": feature[\"labels\"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length,",
"part of TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths",
"one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps",
"import transformers from audioengine.metrics.wer import Jiwer from datasets import load_metric from torch import",
"(i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`):",
"or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of",
"of different lenghts and need # different padding methods input_features = [{\"input_values\": feature[\"input_values\"]}",
"not valid. Choose one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1: loss =",
"unpacked before being fed to the model. Most models expect the targets under",
"autocast from tqdm import tqdm from transformers import ( Trainer, Wav2Vec2Processor, ) from",
"= None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split",
"None else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None",
"training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform a training",
"if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument",
"we do not want to group tokens when computing the metrics label_str =",
"step on a batch of inputs. Subclass and override to inject custom behavior.",
"Optional, Union import numpy as np import torch import transformers from audioengine.metrics.wer import",
"max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding with -100 to ignore loss correctly",
"maximum acceptable input length for the model if that argument is not provided.",
"Maximum length of the ``input_values`` of the returned list and optionally padding length",
"if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss: #",
"the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data.",
"lenghts and need # different padding methods input_features = [{\"input_values\": feature[\"input_values\"]} for feature",
"None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str,",
"not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False)",
"self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def",
":obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if",
"not None else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else",
"*args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset,",
"hardware with compute capability >= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool, str]",
"pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the",
"(:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`,",
"a strategy to pad the returned sequences (according to the model's padding side",
"fed to the model. Most models expect the targets under the argument :obj:`labels`.",
"the batch (or no padding if only a single sequence if provided). *",
"super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or",
"in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor():",
"return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", )",
"(:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally",
"= labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model:",
"-100) batch[\"labels\"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs:",
"of the ``input_values`` of the returned list and optionally padding length (see above).",
"the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels",
"from dataclasses import dataclass from typing import Any, Dict, List, Optional, Union import",
"self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer):",
"batch[\"labels\"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str,",
"of TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths =",
"Build the sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not",
"want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer",
"LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index,",
"be of different lenghts and need # different padding methods input_features = [{\"input_values\":",
"`optional`): Maximum length of the ``labels`` returned list and optionally padding length (see",
"\"\"\" Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`)",
"collections from dataclasses import dataclass from typing import Any, Dict, List, Optional, Union",
"Perform a training step on a batch of inputs. Subclass and override to",
"num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor):",
"when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return",
"longest sequence in the batch (or no padding if only a single sequence",
"of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the",
"TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths",
"typing import Any, Dict, List, Optional, Union import numpy as np import torch",
"(inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean',",
"loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1:",
"custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):",
"if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return",
"Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor",
"and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train.",
"of the provided value. This is especially useful to enable the use of",
"This is especially useful to enable the use of Tensor Cores on NVIDIA",
"amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach()",
"since they have to be of different lenghts and need # different padding",
"max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels:",
"a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified",
"models expect the targets under the argument :obj:`labels`. Check your model's documentation for",
"model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to",
"group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str,",
"train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The",
"that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e.,",
"= loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss,",
"pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str",
"on NVIDIA hardware with compute capability >= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding:",
"accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. \"\"\"",
"is not None else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None",
"max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels,",
"torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None # Build the sampler.",
"multiple of the provided value. This is especially useful to enable the use",
"the sequence to a multiple of the provided value. This is especially useful",
") # replace padding with -100 to ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1),",
"correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return batch class CTCTrainer(Trainer): def",
"self.length_field_name is not None else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not",
"scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add less",
"if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None",
"rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor): def",
"return None # Build the sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if",
"import Any, Dict, List, Optional, Union import numpy as np import torch import",
"else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == \"mean\":",
"enable the use of Tensor Cores on NVIDIA hardware with compute capability >=",
"-100 to ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return",
"length_field_name should possibly be part of TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args,",
"pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) #",
"# different padding methods input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features] label_features",
"the sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None",
"= loss.sum() / (inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose",
"labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module,",
"return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size,",
"the returned sequences (according to the model's padding side and padding index) among:",
"the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`:",
"None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None",
":obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to",
"processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int]",
"_get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return",
"= self.tokenizer.model_input_names[0] if self.tokenizer is not None else None if self.args.world_size <= 1:",
"(:obj:`int`, `optional`): If set will pad the sequence to a multiple of the",
"= None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] =",
"self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss:",
"dictionary will be unpacked before being fed to the model. Most models expect",
"with compute capability >= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool, str] =",
"from audioengine.metrics.wer import Jiwer from datasets import load_metric from torch import nn from",
"* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or",
"and need # different padding methods input_features = [{\"input_values\": feature[\"input_values\"]} for feature in",
"* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with",
"sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of",
"to a multiple of the provided value. This is especially useful to enable",
"as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add",
"https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part of TrainingArguments instead def",
"torch.cuda.amp import autocast from tqdm import tqdm from transformers import ( Trainer, Wav2Vec2Processor,",
"above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple",
"None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str,",
"import dataclass from typing import Any, Dict, List, Optional, Union import numpy as",
"else: loss.backward() return loss.detach() # add less aggressive smoothing to progress bar for",
"True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None",
"the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a",
"dataclasses import dataclass from typing import Any, Dict, List, Optional, Union import numpy",
"a multiple of the provided value. This is especially useful to enable the",
"instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def",
"= self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if",
"batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the",
"input length for the model if that argument is not provided. * :obj:`False`",
"max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and",
"a batch of inputs. Subclass and override to inject custom behavior. Args: model",
"state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class",
"self.train_dataset[self.length_field_name] if self.length_field_name is not None else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer",
"padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None",
"should possibly be part of TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs):",
"= tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): #",
"and labels since they have to be of different lenghts and need #",
"# scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add less aggressive",
"Jiwer from datasets import load_metric from torch import nn from torch.cuda.amp import autocast",
"https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb#scrollTo=lbQf5GuZyQ4_ import collections from dataclasses import dataclass from typing import Any, Dict, List,",
"can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum",
"loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss, self.optimizer)",
"= self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss =",
"the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5",
"-100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group",
"to pad the returned sequences (according to the model's padding side and padding",
"model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum() / (inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction}",
"bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs): if",
"__call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels",
"None else None if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name",
"model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else:",
"strategy to pad the returned sequences (according to the model's padding side and",
"torch import transformers from audioengine.metrics.wer import Jiwer from datasets import load_metric from torch",
"and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the",
"__init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) ->",
"output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length",
"length of the ``input_values`` of the returned list and optionally padding length (see",
"Any, Dict, List, Optional, Union import numpy as np import torch import transformers",
"str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int]",
"if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum",
"self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding",
"= self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == \"mean\": loss =",
"will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for",
"use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).",
"inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean() elif",
"Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] =",
"from torch.cuda.amp import autocast from tqdm import tqdm from transformers import ( Trainer,",
"returned sequences (according to the model's padding side and padding index) among: *",
"of ['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if",
"not None else None if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths,",
"training loss on this batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with",
"Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The",
"the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default):",
"from transformers import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass",
"will be unpacked before being fed to the model. Most models expect the",
"import load_metric from torch import nn from torch.cuda.amp import autocast from tqdm import",
"Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int]",
"= train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset,",
"1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size,",
"label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding with -100 to ignore",
"smoothing=0.1) self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should",
"loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == \"mean\": loss",
"Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of:",
"Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: #",
"List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if",
"to the maximum acceptable input length for the model if that argument is",
"train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:",
"The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`,",
"self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward()",
"GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part of TrainingArguments instead def __init__(self, train_seq_lengths:",
"= labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor,",
"batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`):",
"from tqdm import tqdm from transformers import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils",
"a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable",
"import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator that will dynamically pad",
"arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. \"\"\" model.train()",
"expect the targets under the argument :obj:`labels`. Check your model's documentation for all",
"class DataCollatorCTCWithPadding: \"\"\" Data collator that will dynamically pad the inputs received. Args:",
"to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer =",
"different lenghts and need # different padding methods input_features = [{\"input_values\": feature[\"input_values\"]} for",
"training step on a batch of inputs. Subclass and override to inject custom",
"documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on",
"pad the returned sequences (according to the model's padding side and padding index)",
"def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and",
"the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {\"wer\": wer}",
"especially useful to enable the use of Tensor Cores on NVIDIA hardware with",
"Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str`",
"(or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad",
"if model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss =",
"be part of TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs)",
"provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch",
":class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences",
"the model. Most models expect the targets under the argument :obj:`labels`. Check your",
"# replace padding with -100 to ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)",
"need # different padding methods input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features]",
"different padding methods input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features] label_features =",
"of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). \"\"\"",
"= pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str =",
"loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return batch class CTCTrainer(Trainer):",
"maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input",
"lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None model_input_name = self.tokenizer.model_input_names[0]",
"The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of",
"1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: #",
"# split inputs and labels since they have to be of different lenghts",
"model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No",
"return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids",
"if self.tokenizer is not None else None if self.args.world_size <= 1: return LengthGroupedSampler(",
"model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform a training step",
"as np import torch import transformers from audioengine.metrics.wer import Jiwer from datasets import",
"control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution",
"return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) ->",
"# length_field_name should possibly be part of TrainingArguments instead def __init__(self, train_seq_lengths: List[int],",
"feature[\"input_values\"]} for feature in features] label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]",
"ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return batch class",
"self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif",
"padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels,",
"loss on this batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast():",
"override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs",
"None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None if self.args.world_size",
"class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps,",
"raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps >",
"if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6",
"> 1: if model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\":",
"index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the",
"or to the maximum acceptable input length for the model if that argument",
"from torch import nn from torch.cuda.amp import autocast from tqdm import tqdm from",
"to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model.",
"sequence to a multiple of the provided value. This is especially useful to",
"loss = loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum() / (inputs[\"labels\"] >=",
"transformers import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class",
"model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the",
"from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part of TrainingArguments instead",
"dataclass from typing import Any, Dict, List, Optional, Union import numpy as np",
"provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length`",
"the argument :obj:`max_length` or to the maximum acceptable input length for the model",
"(Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None",
"batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:",
"model. The dictionary will be unpacked before being fed to the model. Most",
"if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction",
"if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding",
"length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of",
"= None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features:",
"for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'`",
"Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator",
"to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str,",
"model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss",
"self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else:",
"\"sum\": loss = loss.sum() / (inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not",
"@dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator that will dynamically pad the inputs received.",
") from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator that",
"being fed to the model. Most models expect the targets under the argument",
"to :obj:`True`): Select a strategy to pad the returned sequences (according to the",
"padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to",
"else None if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name )",
":obj:`torch.Tensor`: The tensor with training loss on this batch. \"\"\" model.train() inputs =",
"pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids)",
"value. This is especially useful to enable the use of Tensor Cores on",
"else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps",
"DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric =",
"class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\"",
"a training step on a batch of inputs. Subclass and override to inject",
"model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None if self.args.world_size <=",
"nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform a training step on",
"self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None model_input_name",
"your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training",
"loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum() / (inputs[\"labels\"] >= 0).sum() else:",
":obj:`True`): Select a strategy to pad the returned sequences (according to the model's",
"lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, )",
"useful to enable the use of Tensor Cores on NVIDIA hardware with compute",
"proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select",
"sequences (according to the model's padding side and padding index) among: * :obj:`True`",
"transformers from audioengine.metrics.wer import Jiwer from datasets import load_metric from torch import nn",
"pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of",
"to enable the use of Tensor Cores on NVIDIA hardware with compute capability",
"= processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens",
"collections.abc.Sized ): return None # Build the sampler. if self.args.group_by_length: # lengths =",
"returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length",
"self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if",
"= np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we",
"self.train_dataset, collections.abc.Sized ): return None # Build the sampler. if self.args.group_by_length: # lengths",
"side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest",
"DataCollatorCTCWithPadding: \"\"\" Data collator that will dynamically pad the inputs received. Args: processor",
"torch.Tensor]: # split inputs and labels since they have to be of different",
"computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {\"wer\":",
"import Jiwer from datasets import load_metric from torch import nn from torch.cuda.amp import",
"features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch",
"Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform a training step on a batch",
"= None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) ->",
"if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None # Build",
"tqdm from transformers import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler",
"processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults",
"in features] label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features] batch = self.processor.pad(",
"== \"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum() /",
"loss.sum() / (inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one",
"capability >= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length:",
"lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor): def __call__(pred):",
"0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']\") if",
"data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy",
"<= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset,",
"that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used",
"for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):",
"pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:",
"model. Most models expect the targets under the argument :obj:`labels`. Check your model's",
"\"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum() / (inputs[\"labels\"]",
"is not None else None if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size,",
"length specified with the argument :obj:`max_length` or to the maximum acceptable input length",
">= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int]",
"model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else:",
"length for the model if that argument is not provided. * :obj:`False` or",
"import numpy as np import torch import transformers from audioengine.metrics.wer import Jiwer from",
"NVIDIA hardware with compute capability >= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool,",
"under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return:",
"model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and",
"loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with",
"on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step =",
"= processor.batch_decode(pred_ids) # we do not want to group tokens when computing the",
"tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name",
"self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features,",
"self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif",
"replace padding with -100 to ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"]",
"Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data",
"with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace",
"import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding:",
"tqdm import tqdm from transformers import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import",
"for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this",
"lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list",
"defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to",
"argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The",
"= 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be",
"to be of different lenghts and need # different padding methods input_features =",
"isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None # Build the",
"'sum']\") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward()",
"\"\"\" Perform a training step on a batch of inputs. Subclass and override",
"labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding with",
"-> torch.Tensor: \"\"\" Perform a training step on a batch of inputs. Subclass",
"`optional`): If set will pad the sequence to a multiple of the provided",
"tensor with training loss on this batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs) if",
"acceptable input length for the model if that argument is not provided. *",
"argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can",
"= self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad(",
"Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]])",
"pad the sequence to a multiple of the provided value. This is especially",
"this batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss =",
"state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 #",
"self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model,",
"in the batch (or no padding if only a single sequence if provided).",
"not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a",
"valid. Choose one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1: loss = loss",
"feature in features] label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features] batch =",
"import torch import transformers from audioengine.metrics.wer import Jiwer from datasets import load_metric from",
"== \"sum\": loss = loss.sum() / (inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is",
"padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in",
"for feature in features] label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features] batch",
"torch.Tensor: \"\"\" Perform a training step on a batch of inputs. Subclass and",
"the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum",
"be unpacked before being fed to the model. Most models expect the targets",
"# Source: https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb#scrollTo=lbQf5GuZyQ4_ import collections from dataclasses import dataclass from typing import Any,",
"optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the",
"used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to",
"processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or",
"have to be of different lenghts and need # different padding methods input_features",
"[{\"input_ids\": feature[\"labels\"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of,",
"Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have",
"for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", )",
") else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return",
"inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor,",
"padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a",
"Select a strategy to pad the returned sequences (according to the model's padding",
"labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]])",
"optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels``",
"the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`):",
"/ (inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of",
"def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform a",
"returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set",
"and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad",
"among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch",
"isinstance( self.train_dataset, collections.abc.Sized ): return None # Build the sampler. if self.args.group_by_length: #",
"input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features] label_features = [{\"input_ids\": feature[\"labels\"]} for",
"pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\",",
"load_metric from torch import nn from torch.cuda.amp import autocast from tqdm import tqdm",
"label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding,",
"estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar =",
"with -100 to ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels",
"class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part of TrainingArguments instead def __init__(self,",
"Pad to the longest sequence in the batch (or no padding if only",
"self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction",
"compute capability >= 7.5 (Volta). \"\"\" processor: Wav2Vec2Processor padding: Union[bool, str] = True",
"to a maximum length specified with the argument :obj:`max_length` or to the maximum",
"received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`,",
"`optional`): Maximum length of the ``input_values`` of the returned list and optionally padding",
"def __call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] =",
"elif model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum() / (inputs[\"labels\"] >= 0).sum() else: raise",
":obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences",
"Maximum length of the ``labels`` returned list and optionally padding length (see above).",
"pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing",
"and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence",
"np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do",
"`optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according",
"Most models expect the targets under the argument :obj:`labels`. Check your model's documentation",
"label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {\"wer\": wer} return __call__",
"with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values``",
"of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The",
"inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss",
"the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad",
"to ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] = labels return batch",
"Source: https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb#scrollTo=lbQf5GuZyQ4_ import collections from dataclasses import dataclass from typing import Any, Dict,",
"(:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets",
"behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The",
":obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no",
"is not valid. Choose one of ['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1: loss",
"sequence in the batch (or no padding if only a single sequence if",
"return_tensors=\"pt\", ) # replace padding with -100 to ignore loss correctly labels =",
"loss.backward() return loss.detach() # add less aggressive smoothing to progress bar for better",
"padding methods input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features] label_features = [{\"input_ids\":",
"better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar",
"**kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset)",
"\"\"\" processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels:",
"return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric",
"torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to",
"to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`:",
"less aggressive smoothing to progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self,",
"else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions",
"from datasets import load_metric from torch import nn from torch.cuda.amp import autocast from",
"with the argument :obj:`max_length` or to the maximum acceptable input length for the",
"audioengine.metrics.wer import Jiwer from datasets import load_metric from torch import nn from torch.cuda.amp",
"features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since",
"List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they",
"Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs",
"['mean', 'sum']\") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp:",
"The dictionary will be unpacked before being fed to the model. Most models",
"__call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id",
"self.tokenizer.model_input_names[0] if self.tokenizer is not None else None if self.args.world_size <= 1: return",
"model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum()",
"axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not",
"feature[\"labels\"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\",",
"with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return",
"* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or",
"Pad to a maximum length specified with the argument :obj:`max_length` or to the",
"argument :obj:`max_length` or to the maximum acceptable input length for the model if",
"smoothing to progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state,",
"pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want",
"datasets import load_metric from torch import nn from torch.cuda.amp import autocast from tqdm",
"with training loss on this batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs) if self.use_amp:",
"self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\")",
"features] label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features] batch = self.processor.pad( input_features,",
"sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else",
"padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence",
"= load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1)",
"padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned",
"# add less aggressive smoothing to progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback):",
"self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly",
"def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids ==",
"train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized",
"Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None #",
"nn from torch.cuda.amp import autocast from tqdm import tqdm from transformers import (",
"(according to the model's padding side and padding index) among: * :obj:`True` or",
"aggressive smoothing to progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args,",
"self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() #",
"the provided value. This is especially useful to enable the use of Tensor",
"): return None # Build the sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name]",
"# with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward()",
"Dict, List, Optional, Union import numpy as np import torch import transformers from",
"padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`,",
"solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part of TrainingArguments",
"processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when",
"elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss)",
"[{\"input_values\": feature[\"input_values\"]} for feature in features] label_features = [{\"input_ids\": feature[\"labels\"]} for feature in",
"tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str)",
"\"\"\" model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs)",
"> 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex:",
"None if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else:",
"padding with -100 to ignore loss correctly labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch[\"labels\"] =",
"# elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed:",
"targets of the model. The dictionary will be unpacked before being fed to",
"inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary",
"**kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not",
"provided value. This is especially useful to enable the use of Tensor Cores",
"``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`,",
"# we do not want to group tokens when computing the metrics label_str",
"they have to be of different lenghts and need # different padding methods",
"Dict[str, torch.Tensor]: # split inputs and labels since they have to be of",
"sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the",
"padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the",
"None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs",
"on a batch of inputs. Subclass and override to inject custom behavior. Args:",
"np import torch import transformers from audioengine.metrics.wer import Jiwer from datasets import load_metric",
"specified with the argument :obj:`max_length` or to the maximum acceptable input length for",
"autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu >",
"possibly be part of TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args,",
"of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`):",
"= loss.mean() elif model.module.config.ctc_loss_reduction == \"sum\": loss = loss.sum() / (inputs[\"labels\"] >= 0).sum()",
"above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally",
"DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\" Data collator that will dynamically pad the inputs",
"self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler(",
"batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`:",
"(default): No padding (i.e., can output a batch with sequences of different lengths).",
"(see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and",
"import collections from dataclasses import dataclass from typing import Any, Dict, List, Optional,",
"before being fed to the model. Most models expect the targets under the",
"padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding with -100 to ignore loss",
":obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor",
"(see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a",
"will pad the sequence to a multiple of the provided value. This is",
"is especially useful to enable the use of Tensor Cores on NVIDIA hardware",
"the longest sequence in the batch (or no padding if only a single",
"= self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding with -100",
"add less aggressive smoothing to progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def",
"of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`,",
"or not isinstance( self.train_dataset, collections.abc.Sized ): return None # Build the sampler. if",
"Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. \"\"\" model.train() inputs",
"dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing",
"/ self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as",
"metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {\"wer\": wer} return",
"Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self,",
":obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the",
"different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned",
"inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model",
"only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length",
"length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to",
"set will pad the sequence to a multiple of the provided value. This",
"CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform",
"wer_metric = load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits,",
"if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() #",
"( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: \"\"\"",
"torch import nn from torch.cuda.amp import autocast from tqdm import tqdm from transformers",
"all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch.",
"= True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] =",
"(:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad",
"loss = loss.sum() / (inputs[\"labels\"] >= 0).sum() else: raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid.",
"the maximum acceptable input length for the model if that argument is not",
"no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to",
"= [{\"input_values\": feature[\"input_values\"]} for feature in features] label_features = [{\"input_ids\": feature[\"labels\"]} for feature",
"max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def",
"self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add less aggressive smoothing to progress",
"of the model. The dictionary will be unpacked before being fed to the",
"targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments.",
"is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output",
"self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean()",
"max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding",
"if self.length_field_name is not None else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is",
"def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step",
"from typing import Any, Dict, List, Optional, Union import numpy as np import",
"list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will",
"self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths,",
"batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=\"pt\", ) with self.processor.as_target_processor(): labels_batch =",
"self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == \"mean\": loss = loss.mean() elif model.module.config.ctc_loss_reduction ==",
"CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1)",
"self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding with -100 to",
"The tensor with training loss on this batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs)",
"on this batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss",
"-> Dict[str, torch.Tensor]: # split inputs and labels since they have to be",
"Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with",
"scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add less aggressive smoothing",
"do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids,",
"to progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control,",
"batch. \"\"\" model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model,",
"self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name,",
"self.tokenizer is not None else None if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset,",
") else: return super()._get_train_sampler() wer_metric = load_metric(\"wer\") def compute_metrics(processor): def __call__(pred): pred_logits =",
"Any]]) -> torch.Tensor: \"\"\" Perform a training step on a batch of inputs.",
"0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part",
"pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors=\"pt\", ) # replace padding with -100 to ignore loss correctly labels",
"Union[torch.Tensor, Any]]) -> torch.Tensor: \"\"\" Perform a training step on a batch of",
"inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding",
"(:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length"
] |
[
"[ # The home page path('', views.index, name='home'), # Matches any html file",
"path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"), path('generate-contract/<int:id>',",
"path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"), path('generate-contract/<int:id>', views.generate_contract, name=\"generate-contract\"), re_path(r'^.*\\.*',",
"path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"), path('generate-contract/<int:id>', views.generate_contract, name=\"generate-contract\"), re_path(r'^.*\\.*', views.pages, name='pages') ]",
"re_path from apps.app import views urlpatterns = [ # The home page path('',",
"path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual',",
"\"\"\" Copyright (c) 2019 - present AppSeed.us \"\"\" from django.urls import path, re_path",
"- present AppSeed.us \"\"\" from django.urls import path, re_path from apps.app import views",
"path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'), # Api views for editing tables path('edit-table-data',",
"for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal',",
"\"\"\" from django.urls import path, re_path from apps.app import views urlpatterns = [",
"views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list,",
"views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form,",
"# The home page path('', views.index, name='home'), # Matches any html file path('bonus-faccata/',",
"AppSeed.us \"\"\" from django.urls import path, re_path from apps.app import views urlpatterns =",
"views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'), # Api views for editing",
"name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"), path('generate-contract/<int:id>', views.generate_contract, name=\"generate-contract\"), re_path(r'^.*\\.*', views.pages, name='pages')",
"views.search, name='search_results'), # Api views for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list,",
"import path, re_path from apps.app import views urlpatterns = [ # The home",
"apps.app import views urlpatterns = [ # The home page path('', views.index, name='home'),",
"name='catastal'), path('search', views.search, name='search_results'), # Api views for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'),",
"views.catastal, name='catastal'), path('search', views.search, name='search_results'), # Api views for editing tables path('edit-table-data', views.save_table_data,",
"editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list,",
"html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual,",
"path, re_path from apps.app import views urlpatterns = [ # The home page",
"(c) 2019 - present AppSeed.us \"\"\" from django.urls import path, re_path from apps.app",
"file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'),",
"path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search',",
"path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>',",
"name='home'), # Matches any html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>',",
"import views urlpatterns = [ # The home page path('', views.index, name='home'), #",
"tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"),",
"2019 - present AppSeed.us \"\"\" from django.urls import path, re_path from apps.app import",
"path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>',",
"home page path('', views.index, name='home'), # Matches any html file path('bonus-faccata/', views.bonus, name='bonus'),",
"name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"), path('generate-contract/<int:id>', views.generate_contract, name=\"generate-contract\"),",
"utf-8 -*- \"\"\" Copyright (c) 2019 - present AppSeed.us \"\"\" from django.urls import",
"from apps.app import views urlpatterns = [ # The home page path('', views.index,",
"Copyright (c) 2019 - present AppSeed.us \"\"\" from django.urls import path, re_path from",
"views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal,",
"present AppSeed.us \"\"\" from django.urls import path, re_path from apps.app import views urlpatterns",
"Matches any html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'),",
"views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"), path('generate-contract/<int:id>', views.generate_contract,",
"name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'), # Api views for editing tables",
"path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'), # Api views for",
"The home page path('', views.index, name='home'), # Matches any html file path('bonus-faccata/', views.bonus,",
"views for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"),",
"encoding: utf-8 -*- \"\"\" Copyright (c) 2019 - present AppSeed.us \"\"\" from django.urls",
"name='search_results'), # Api views for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"),",
"# Api views for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal',",
"name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"),",
"page path('', views.index, name='home'), # Matches any html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>',",
"django.urls import path, re_path from apps.app import views urlpatterns = [ # The",
"path('search', views.search, name='search_results'), # Api views for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium',",
"path('', views.index, name='home'), # Matches any html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view,",
"Api views for editing tables path('edit-table-data', views.save_table_data, name='edit-table-data'), path('condominium', views.condo_list, name=\"condominium\"), path('catastal', views.catastal_list,",
"name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'),",
"name=\"condominium\"), path('catastal', views.catastal_list, name=\"catastal\"), path('admin-legal', views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"),",
"# Matches any html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal,",
"from django.urls import path, re_path from apps.app import views urlpatterns = [ #",
"= [ # The home page path('', views.index, name='home'), # Matches any html",
"-*- \"\"\" Copyright (c) 2019 - present AppSeed.us \"\"\" from django.urls import path,",
"views urlpatterns = [ # The home page path('', views.index, name='home'), # Matches",
"views.index, name='home'), # Matches any html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'),",
"any html file path('bonus-faccata/', views.bonus, name='bonus'), path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>',",
"# -*- encoding: utf-8 -*- \"\"\" Copyright (c) 2019 - present AppSeed.us \"\"\"",
"path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'), #",
"name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'),",
"name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'), # Api views",
"views.data_iniziali_view, name='data-iniziali'), path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search,",
"-*- encoding: utf-8 -*- \"\"\" Copyright (c) 2019 - present AppSeed.us \"\"\" from",
"<reponame>MiloshBogdanovic/Auri-Soft # -*- encoding: utf-8 -*- \"\"\" Copyright (c) 2019 - present AppSeed.us",
"views.legal, name='legal'), path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'), path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'), path('search', views.search, name='search_results'), # Api",
"views.admin_legal_list, name=\"admin-legal\"), path('admin-individual', views.admin_individual_list, name=\"admin-individual\"), path('edit-form/<str:table>/<int:id>', views.edit_form, name=\"edit-form\"), path('generate-contract/<int:id>', views.generate_contract, name=\"generate-contract\"), re_path(r'^.*\\.*', views.pages,",
"urlpatterns = [ # The home page path('', views.index, name='home'), # Matches any"
] |
[
"while sexo not in 'MmFf': sexo = str(input('Dados invalidos Por favor digite novamente",
"= str(input('Informe seu sexo : ')).strip().upper()[0] while sexo not in 'MmFf': sexo =",
"sexo not in 'MmFf': sexo = str(input('Dados invalidos Por favor digite novamente :",
"<filename>Curso_Guanabara/aula57.py<gh_stars>1-10 sexo = str(input('Informe seu sexo : ')).strip().upper()[0] while sexo not in 'MmFf':",
"= str(input('Dados invalidos Por favor digite novamente : ')).strip().upper()[0] print('Sexo {} registrado com",
"str(input('Dados invalidos Por favor digite novamente : ')).strip().upper()[0] print('Sexo {} registrado com sucesso'.format(sexo))",
"'MmFf': sexo = str(input('Dados invalidos Por favor digite novamente : ')).strip().upper()[0] print('Sexo {}",
"sexo = str(input('Dados invalidos Por favor digite novamente : ')).strip().upper()[0] print('Sexo {} registrado",
"sexo = str(input('Informe seu sexo : ')).strip().upper()[0] while sexo not in 'MmFf': sexo",
": ')).strip().upper()[0] while sexo not in 'MmFf': sexo = str(input('Dados invalidos Por favor",
"')).strip().upper()[0] while sexo not in 'MmFf': sexo = str(input('Dados invalidos Por favor digite",
"str(input('Informe seu sexo : ')).strip().upper()[0] while sexo not in 'MmFf': sexo = str(input('Dados",
"in 'MmFf': sexo = str(input('Dados invalidos Por favor digite novamente : ')).strip().upper()[0] print('Sexo",
"not in 'MmFf': sexo = str(input('Dados invalidos Por favor digite novamente : ')).strip().upper()[0]",
"sexo : ')).strip().upper()[0] while sexo not in 'MmFf': sexo = str(input('Dados invalidos Por",
"seu sexo : ')).strip().upper()[0] while sexo not in 'MmFf': sexo = str(input('Dados invalidos"
] |
[
"import TransferFunction import numpy as np class Linear(TransferFunction): def getTransferFunction(x): return x def",
"from Chapter5.TransferFunction import TransferFunction import numpy as np class Linear(TransferFunction): def getTransferFunction(x): return",
"numpy as np class Linear(TransferFunction): def getTransferFunction(x): return x def getTransferFunctionDerivative(x): return np.ones(len(x))",
"TransferFunction import numpy as np class Linear(TransferFunction): def getTransferFunction(x): return x def getTransferFunctionDerivative(x):",
"import numpy as np class Linear(TransferFunction): def getTransferFunction(x): return x def getTransferFunctionDerivative(x): return",
"Chapter5.TransferFunction import TransferFunction import numpy as np class Linear(TransferFunction): def getTransferFunction(x): return x"
] |
[
"pylint: disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import",
"import write # pylint: disable=unused-import from .error import HeifError # pylint: disable=unused-import from",
"HeifError # pylint: disable=unused-import from .as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import from",
"from . import _libheif # pylint: disable=import-self __version__ = \"0.1.4\" def libheif_version(): return",
".reader import HeifFile, UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import from .writer import",
"check, read, open # pylint: disable=redefined-builtin,unused-import from .writer import write # pylint: disable=unused-import",
"<gh_stars>10-100 from .constants import * # pylint: disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile,",
"# pylint: disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile, check, read, open # pylint:",
"from .as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import from . import _libheif #",
"disable=unused-import from .as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import from . import _libheif",
"disable=redefined-builtin,unused-import from .writer import write # pylint: disable=unused-import from .error import HeifError #",
"import register_heif_opener, check_heif_magic # pylint: disable=unused-import from . import _libheif # pylint: disable=import-self",
"# pylint: disable=unused-import from .error import HeifError # pylint: disable=unused-import from .as_opener import",
".as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import from . import _libheif # pylint:",
"UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import from .writer import write # pylint:",
"open # pylint: disable=redefined-builtin,unused-import from .writer import write # pylint: disable=unused-import from .error",
"HeifFile, UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import from .writer import write #",
"pylint: disable=redefined-builtin,unused-import from .writer import write # pylint: disable=unused-import from .error import HeifError",
".constants import * # pylint: disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile, check, read,",
"disable=unused-import from .error import HeifError # pylint: disable=unused-import from .as_opener import register_heif_opener, check_heif_magic",
"disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import from",
"from .error import HeifError # pylint: disable=unused-import from .as_opener import register_heif_opener, check_heif_magic #",
"* # pylint: disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile, check, read, open #",
".error import HeifError # pylint: disable=unused-import from .as_opener import register_heif_opener, check_heif_magic # pylint:",
"import HeifError # pylint: disable=unused-import from .as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import",
"# pylint: disable=unused-import from . import _libheif # pylint: disable=import-self __version__ = \"0.1.4\"",
"write # pylint: disable=unused-import from .error import HeifError # pylint: disable=unused-import from .as_opener",
"pylint: disable=unused-import from .error import HeifError # pylint: disable=unused-import from .as_opener import register_heif_opener,",
"read, open # pylint: disable=redefined-builtin,unused-import from .writer import write # pylint: disable=unused-import from",
"from .reader import HeifFile, UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import from .writer",
"register_heif_opener, check_heif_magic # pylint: disable=unused-import from . import _libheif # pylint: disable=import-self __version__",
"pylint: disable=unused-import from .as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import from . import",
"# pylint: disable=unused-import from .as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import from .",
".writer import write # pylint: disable=unused-import from .error import HeifError # pylint: disable=unused-import",
"import * # pylint: disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile, check, read, open",
"disable=unused-import from . import _libheif # pylint: disable=import-self __version__ = \"0.1.4\" def libheif_version():",
"pylint: disable=unused-import from . import _libheif # pylint: disable=import-self __version__ = \"0.1.4\" def",
"check_heif_magic # pylint: disable=unused-import from . import _libheif # pylint: disable=import-self __version__ =",
"# pylint: disable=redefined-builtin,unused-import from .writer import write # pylint: disable=unused-import from .error import",
"from .writer import write # pylint: disable=unused-import from .error import HeifError # pylint:",
". import _libheif # pylint: disable=import-self __version__ = \"0.1.4\" def libheif_version(): return _libheif.ffi.string(_libheif.lib.heif_get_version()).decode()",
"import HeifFile, UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import from .writer import write",
"from .constants import * # pylint: disable=unused-wildcard-import from .reader import HeifFile, UndecodedHeifFile, check,"
] |
[
"(stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave,",
"float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1])",
"= float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2])",
"from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show()",
"that we can vstack onto this wave = numpy.linspace(1.75,5.25,3500) for waveval in wave:",
"# trim that initial row with all zeroes numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave) plt.imshow(SEDarray)",
"import matplotlib.pyplot as plt from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray =",
"transitmodel = batman.TransitModel(params, time) # creates a transit model object using the time",
"u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark = \"quadratic\"",
"kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 =",
"SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial",
"file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000.",
"spec is in depth, but batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux =",
"limbdark = f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u =",
"'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file",
"I'm using has a fixed format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp",
"= float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary =",
"time) # creates a transit model object using the time array; we can",
"batman import matplotlib.pyplot as plt from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray",
"f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2]",
"file I'm using has a fixed format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1])",
"rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux))",
"params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1 =",
"numpy, sys, math, batman import matplotlib.pyplot as plt from scipy import interpolate file",
"0) # trim that initial row with all zeroes numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave)",
"u2] params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params, time) # creates a transit model",
"um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp",
"= float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark = \"quadratic\" transitmodel",
"in depth, but batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit *",
"PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params",
"array; we can change the depth now by changing what's in params SEDarray",
"params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc",
"as plt from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape)",
"has a fixed format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1])",
"= float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc =",
"# to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec =",
"= interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec,",
"= numpy.zeros(time.shape[0]) # initialize so that we can vstack onto this wave =",
"= batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using",
"numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800)",
"params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm",
"= file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /=",
"relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp =",
"numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec,",
"using the time array; we can change the depth now by changing what's",
"float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1]",
"trim that initial row with all zeroes numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave) plt.imshow(SEDarray) plt.show()",
"= fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0)",
"time array; we can change the depth now by changing what's in params",
"planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f",
"ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark =",
"# sqrt b/c trans. spec is in depth, but batman wants rp/rs fluxtransit",
"object using the time array; we can change the depth now by changing",
"= float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using has a fixed",
"fixed format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a =",
"batman.TransitModel(params, time) # creates a transit model object using the time array; we",
"import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec",
"plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. # to",
"stellarwave /= 10000. # to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave =",
"for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is",
"hardcoded readlines b/c the file I'm using has a fixed format params.per =",
"b/c the file I'm using has a fixed format params.per = float(f.readline().split('=')[1]) params.inc",
"a fixed format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a",
"changing what's in params SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can",
"params.u = [u1, u2] params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params, time) # creates",
"= interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params =",
"numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt',",
"stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time",
"now by changing what's in params SEDarray = numpy.zeros(time.shape[0]) # initialize so that",
"plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. # to um",
"this wave = numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt",
"we can change the depth now by changing what's in params SEDarray =",
"open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the",
"= float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark =",
"time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1])",
"u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params,",
"= batman.TransitModel(params, time) # creates a transit model object using the time array;",
"kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time =",
"transit model object using the time array; we can change the depth now",
"vstack onto this wave = numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval))",
"= numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial row",
"params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth, but batman",
"wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth, but",
"actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial row with all",
"import numpy, sys, math, batman import matplotlib.pyplot as plt from scipy import interpolate",
"fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) #",
"print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. #",
"= numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True,",
"float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params, time) #",
"format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1])",
"in params SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can vstack onto",
"SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can vstack onto this wave",
"readlines b/c the file I'm using has a fixed format params.per = float(f.readline().split('=')[1])",
"stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt',",
"= numpy.delete(SEDarray, 0, 0) # trim that initial row with all zeroes numpy.savez('GJ436b_Trans_SED',",
"to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant]",
"= \"quadratic\" transitmodel = batman.TransitModel(params, time) # creates a transit model object using",
"SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave",
"plt from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray)",
"# hardcoded readlines b/c the file I'm using has a fixed format params.per",
"params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark",
"params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2 =",
"skiprows=800) stellarwave /= 10000. # to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave",
"float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark = \"quadratic\" transitmodel =",
"= transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray =",
"SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial row with all zeroes",
"float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1",
"planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000)",
"stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. # to um relevant =",
"stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True)",
"params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp",
"so that we can vstack onto this wave = numpy.linspace(1.75,5.25,3500) for waveval in",
"* StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) # trim",
"\"quadratic\" transitmodel = batman.TransitModel(params, time) # creates a transit model object using the",
"= numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave,",
"float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1])",
"& (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic')",
"StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave,",
"actualflux = fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0,",
"stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec",
"= float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:])",
"= open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c",
"planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0",
"params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w",
"# initialize so that we can vstack onto this wave = numpy.linspace(1.75,5.25,3500) for",
"can change the depth now by changing what's in params SEDarray = numpy.zeros(time.shape[0])",
"the depth now by changing what's in params SEDarray = numpy.zeros(time.shape[0]) # initialize",
"math, batman import matplotlib.pyplot as plt from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz')",
"change the depth now by changing what's in params SEDarray = numpy.zeros(time.shape[0]) #",
"params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params, time) # creates a transit model object",
"float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u",
"# creates a transit model object using the time array; we can change",
"trans. spec is in depth, but batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux",
"= [u1, u2] params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params, time) # creates a",
"10000. # to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec",
"we can vstack onto this wave = numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp",
"wave = numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c",
"numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. # to um relevant = numpy.where((stellarwave>1.5) &",
"0, 0) # trim that initial row with all zeroes numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray, time=time,",
"float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1])",
"numpy.delete(SEDarray, 0, 0) # trim that initial row with all zeroes numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray,",
"= float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w =",
"stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp =",
"params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using has a",
"the file I'm using has a fixed format params.per = float(f.readline().split('=')[1]) params.inc =",
"depth, but batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval)",
"interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec =",
"unpack=True, skiprows=800) stellarwave /= 10000. # to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5))",
"batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using has",
"interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams",
"= numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans.",
"numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial row with",
"scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave,",
"= f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1,",
"fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray",
"the time array; we can change the depth now by changing what's in",
"batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray =",
"/= 10000. # to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant]",
"= math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth, but batman wants",
"using has a fixed format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp =",
"is in depth, but batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit",
"onto this wave = numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) #",
"[u1, u2] params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params, time) # creates a transit",
"what's in params SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can vstack",
"wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray,",
"float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1])",
"f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines",
"float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using has a fixed format",
"a transit model object using the time array; we can change the depth",
"model object using the time array; we can change the depth now by",
"math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth, but batman wants rp/rs",
"# ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark",
"unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r')",
"initialize so that we can vstack onto this wave = numpy.linspace(1.75,5.25,3500) for waveval",
"numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec",
"but batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray",
"= stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec =",
"params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary",
"= float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp =",
"numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded",
"StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) # trim that",
"= numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f =",
"matplotlib.pyplot as plt from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray']",
"= float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh",
"= numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) #",
"b/c trans. spec is in depth, but batman wants rp/rs fluxtransit = transitmodel.light_curve(params)",
"params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] #",
"float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2",
"= float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark = \"quadratic\" transitmodel = batman.TransitModel(params, time)",
"waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in",
"= stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp",
"can vstack onto this wave = numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp =",
"= numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. # to um relevant = numpy.where((stellarwave>1.5)",
"sqrt b/c trans. spec is in depth, but batman wants rp/rs fluxtransit =",
"numpy.zeros(time.shape[0]) # initialize so that we can vstack onto this wave = numpy.linspace(1.75,5.25,3500)",
"by changing what's in params SEDarray = numpy.zeros(time.shape[0]) # initialize so that we",
"depth now by changing what's in params SEDarray = numpy.zeros(time.shape[0]) # initialize so",
"interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic')",
"in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth,",
"creates a transit model object using the time array; we can change the",
"transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray,",
"params SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can vstack onto this",
"stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. # to um relevant",
"file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec',",
"sys, math, batman import matplotlib.pyplot as plt from scipy import interpolate file ="
] |
[
"-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ]",
"models class Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations = [ migrations.RenameField(",
"dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations = [ migrations.RenameField( model_name='mainmenuitem', old_name='add_subnav', new_name='allow_subnav',",
"-*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wagtailmenus',",
"<reponame>pierremanceaux/wagtailmenus # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import",
"= [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations = [ migrations.RenameField( model_name='mainmenuitem', old_name='add_subnav', new_name='allow_subnav', ),",
"[ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations = [ migrations.RenameField( model_name='mainmenuitem', old_name='add_subnav', new_name='allow_subnav', ), ]",
"Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations = [ migrations.RenameField( model_name='mainmenuitem', old_name='add_subnav',",
"utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):",
"import migrations, models class Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations =",
"migrations, models class Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations = [",
"# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations,",
"from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations",
"unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'),",
"coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class",
"class Migration(migrations.Migration): dependencies = [ ('wagtailmenus', '0009_auto_20160201_0859'), ] operations = [ migrations.RenameField( model_name='mainmenuitem',"
] |
[
"import sys def eprint(*args, **kwargs): \"\"\"Print to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error:",
"sys def eprint(*args, **kwargs): \"\"\"Print to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error: str):",
"str): \"\"\"Print message to stderr and exit with an error code.\"\"\" eprint(error) sys.exit(1)",
"<reponame>JanLikar/envrun import sys def eprint(*args, **kwargs): \"\"\"Print to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def",
"bail(error: str): \"\"\"Print message to stderr and exit with an error code.\"\"\" eprint(error)",
"eprint(*args, **kwargs): \"\"\"Print to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print message",
"def eprint(*args, **kwargs): \"\"\"Print to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print",
"**kwargs): \"\"\"Print to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print message to",
"def bail(error: str): \"\"\"Print message to stderr and exit with an error code.\"\"\"",
"file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print message to stderr and exit with an",
"to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print message to stderr and",
"print(*args, file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print message to stderr and exit with",
"**kwargs) def bail(error: str): \"\"\"Print message to stderr and exit with an error",
"stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print message to stderr and exit",
"\"\"\"Print to stderr.\"\"\" print(*args, file=sys.stderr, **kwargs) def bail(error: str): \"\"\"Print message to stderr"
] |
[
"delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into dynamo are not",
"to another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369 This test is to assert",
"parsable property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': { 'volume':",
"import partial from boto3 import client from test.integration.fixtures import db from dynofunc import",
"hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': { 'volume': 9 } })) user =",
"db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume') assert volume == 9 assert",
") def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into dynamo are not converted to",
"dynamo returns back to a default json parsable property. \"\"\" db(create( table_name='data_const', hash_key='id'))",
"by converting any Decimals that dynamo returns back to a default json parsable",
"type - **namely Decimals**. See https://github.com/boto/boto3/issues/369 This test is to assert that dynofunc",
"are not converted to another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369 This test",
"from test.integration.fixtures import db from dynofunc import ( create, find, add, update, delete,",
"user = db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume') assert volume ==",
"db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': { 'volume': 9 } })) user = db(find(table_name='data_const',",
"'prefrences': { 'volume': 9 } })) user = db(find(table_name='data_const', key={ 'id': 'aaaaaa' }))",
"pytest from functools import partial from boto3 import client from test.integration.fixtures import db",
"not converted to another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369 This test is",
"- **namely Decimals**. See https://github.com/boto/boto3/issues/369 This test is to assert that dynofunc correctly",
"import ( create, find, add, update, delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that",
"dynofunc import ( create, find, add, update, delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts",
"this by converting any Decimals that dynamo returns back to a default json",
"converting any Decimals that dynamo returns back to a default json parsable property.",
"9 } })) user = db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume')",
"**namely Decimals**. See https://github.com/boto/boto3/issues/369 This test is to assert that dynofunc correctly handles",
"query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into dynamo are not converted",
"into dynamo are not converted to another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369",
"that dynamo returns back to a default json parsable property. \"\"\" db(create( table_name='data_const',",
"Decimals**. See https://github.com/boto/boto3/issues/369 This test is to assert that dynofunc correctly handles this",
"} })) user = db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume') assert",
"})) user = db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume') assert volume",
"import client from test.integration.fixtures import db from dynofunc import ( create, find, add,",
"dynamo are not converted to another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369 This",
"See https://github.com/boto/boto3/issues/369 This test is to assert that dynofunc correctly handles this by",
"add, update, delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into dynamo",
"partial from boto3 import client from test.integration.fixtures import db from dynofunc import (",
"converted to another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369 This test is to",
"'id': 'aaaaaa', 'prefrences': { 'volume': 9 } })) user = db(find(table_name='data_const', key={ 'id':",
"find, add, update, delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into",
"{ 'volume': 9 } })) user = db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume",
"from boto3 import client from test.integration.fixtures import db from dynofunc import ( create,",
"that numbers inserted into dynamo are not converted to another type - **namely",
"dynofunc correctly handles this by converting any Decimals that dynamo returns back to",
"import json import pytest from functools import partial from boto3 import client from",
"https://github.com/boto/boto3/issues/369 This test is to assert that dynofunc correctly handles this by converting",
"test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into dynamo are not converted to another type",
"import pytest from functools import partial from boto3 import client from test.integration.fixtures import",
"numbers inserted into dynamo are not converted to another type - **namely Decimals**.",
"test is to assert that dynofunc correctly handles this by converting any Decimals",
"( create, find, add, update, delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers",
"This test is to assert that dynofunc correctly handles this by converting any",
"json import pytest from functools import partial from boto3 import client from test.integration.fixtures",
"<gh_stars>1-10 import json import pytest from functools import partial from boto3 import client",
"returns back to a default json parsable property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const',",
"item={ 'id': 'aaaaaa', 'prefrences': { 'volume': 9 } })) user = db(find(table_name='data_const', key={",
"another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369 This test is to assert that",
"\"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': { 'volume': 9 }",
"def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into dynamo are not converted to another",
"a default json parsable property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa',",
"json parsable property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': {",
"'aaaaaa', 'prefrences': { 'volume': 9 } })) user = db(find(table_name='data_const', key={ 'id': 'aaaaaa'",
"inserted into dynamo are not converted to another type - **namely Decimals**. See",
"table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': { 'volume': 9 } })) user",
"to assert that dynofunc correctly handles this by converting any Decimals that dynamo",
"correctly handles this by converting any Decimals that dynamo returns back to a",
"key={ 'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume') assert volume == 9 assert isinstance(volume,",
"from dynofunc import ( create, find, add, update, delete, query ) def test_numbers_are_not_changed(db):",
"create, find, add, update, delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted",
"'volume': 9 } })) user = db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume =",
"default json parsable property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences':",
"import db from dynofunc import ( create, find, add, update, delete, query )",
"Decimals that dynamo returns back to a default json parsable property. \"\"\" db(create(",
"'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume') assert volume == 9 assert isinstance(volume, int)",
"is to assert that dynofunc correctly handles this by converting any Decimals that",
"to a default json parsable property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id':",
"from functools import partial from boto3 import client from test.integration.fixtures import db from",
"that dynofunc correctly handles this by converting any Decimals that dynamo returns back",
"\"\"\"Asserts that numbers inserted into dynamo are not converted to another type -",
"db from dynofunc import ( create, find, add, update, delete, query ) def",
"property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': { 'volume': 9",
"back to a default json parsable property. \"\"\" db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={",
"= db(find(table_name='data_const', key={ 'id': 'aaaaaa' })) volume = user.item().get('prefrences').get('volume') assert volume == 9",
"client from test.integration.fixtures import db from dynofunc import ( create, find, add, update,",
"update, delete, query ) def test_numbers_are_not_changed(db): \"\"\"Asserts that numbers inserted into dynamo are",
"any Decimals that dynamo returns back to a default json parsable property. \"\"\"",
"test.integration.fixtures import db from dynofunc import ( create, find, add, update, delete, query",
"functools import partial from boto3 import client from test.integration.fixtures import db from dynofunc",
"assert that dynofunc correctly handles this by converting any Decimals that dynamo returns",
"handles this by converting any Decimals that dynamo returns back to a default",
"db(create( table_name='data_const', hash_key='id')) db(add(table_name='data_const', item={ 'id': 'aaaaaa', 'prefrences': { 'volume': 9 } }))",
"boto3 import client from test.integration.fixtures import db from dynofunc import ( create, find,"
] |
[
"def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in the",
"development team # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"all users if none provided. \"\"\" params = {} if contextid is not",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"= path if secure is not None: params['secure'] = secure return six.next(six.itervalues(self.zap._request(self.zap.base +",
"whether or not the user, with the given ID that belongs to the",
"with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid,",
"self.zap = zap def users_list(self, contextid=None): \"\"\" Gets a list of users that",
"= {} if contextid is not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base +",
"'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): \"\"\" Gets the data of the user",
"+ 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): \"\"\" Gets the data of the",
"name, value, path=None, secure=None, apikey=''): \"\"\" Sets the specified cookie for the user",
"__init__(self, zap): self.zap = zap def users_list(self, contextid=None): \"\"\" Gets a list of",
"apikey=''): \"\"\" Renames the user with the given ID that belongs to the",
"'apikey': apikey}))) def set_user_name(self, contextid, userid, name, apikey=''): \"\"\" Renames the user with",
"'name': name, 'value': value, 'apikey': apikey} if path is not None: params['path'] =",
"contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\" Gets the authentication credentials of the user",
"contextid, userid): \"\"\" Gets the authentication credentials of the user with given ID",
"for the user identified by the Context and User Ids, e.g. cookies and",
"verification strategy has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId':",
"identified user, returning the authentication request and whether it appears to have succeeded.",
"as the identified user, returning the authentication request and whether it appears to",
"userid, 'apikey': apikey} if lastpollresult is not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms",
"params = {} if contextid is not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base",
"Sets the specified cookie for the user identified by the Context and User",
"this file except in compliance with the License. # You may obtain a",
"the given ID, should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid,",
"authentication request and whether it appears to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"\"\"\" import six class users(object): def __init__(self, zap): self.zap = zap def users_list(self,",
"None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid,",
"'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self, contextid, name, apikey=''): \"\"\" Creates a",
"with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name,",
"it appears to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId':",
"userid, apikey=''): \"\"\" Tries to poll as the identified user, returning the authentication",
"ANY KIND, either express or implied. # See the License for the specific",
"Gets the authentication state information for the user identified by the Context and",
"context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId':",
"the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name':",
"and User Ids, e.g. cookies and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/',",
"{'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid, name,",
"Sets fields in the authentication state for the user identified by the Context",
"apikey=''): \"\"\" Tries to poll as the identified user, returning the authentication request",
"set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\" Sets whether or not the user, with",
"generated. \"\"\" import six class users(object): def __init__(self, zap): self.zap = zap def",
"userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication credentials for the user with the",
"request and whether it appears to have succeeded. This will only work if",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"the user, with the given ID that belongs to the context with the",
"def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication credentials for the",
"userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid, name, apikey=''): \"\"\" Renames",
"with the given ID that belongs to the context with the given ID,",
"to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId':",
"contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid, name, apikey=''):",
"\"\"\" Gets the data of the user with the given ID that belongs",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"\"\"\" Removes the user with the given ID that belongs to the context",
"the authentication credentials for the user with the given ID that belongs to",
"= zap def users_list(self, contextid=None): \"\"\" Gets a list of users that belong",
"the context with the given ID, should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"if contextid is not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params)))",
"that belongs to the context with the given ID, should be enabled. \"\"\"",
"'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\" Gets the authentication credentials of",
"if lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not None:",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to authenticate",
"'userId': userid}))) def get_authentication_state(self, contextid, userid): \"\"\" Gets the authentication state information for",
"OF ANY KIND, either express or implied. # See the License for the",
"given ID, should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId':",
"belongs to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/',",
"related class files. # # ZAP is an HTTP/HTTPS proxy for assessing web",
"is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self,",
"not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid,",
"Creates a new user with the given name for the context with the",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey}))) def",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def",
"contextid, 'userId': userid, 'apikey': apikey} if lastpollresult is not None: params['lastPollResult'] = lastpollresult",
"the authentication request and whether it appears to have succeeded. This will only",
"+ 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled,",
"to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId':",
"'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return",
"userid): \"\"\" Gets the authentication state information for the user identified by the",
"'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication credentials",
"{'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey} if",
"of the user with given ID that belongs to the context with the",
"{'contextId': contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] =",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self,",
"{'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None,",
"params))) def get_user_by_id(self, contextid, userid): \"\"\" Gets the data of the user with",
"{'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\"",
"def new_user(self, contextid, name, apikey=''): \"\"\" Creates a new user with the given",
"with the given ID, should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId':",
"the credentials of the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid): \"\"\" Gets",
"\"\"\" params = {} if contextid is not None: params['contextId'] = contextid return",
"automatically generated. \"\"\" import six class users(object): def __init__(self, zap): self.zap = zap",
"contextid, userid, domain, name, value, path=None, secure=None, apikey=''): \"\"\" Sets the specified cookie",
"requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain, name, value,",
"ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def",
"with the given ID. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid,",
"+ 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid): \"\"\" Gets the",
"cookies and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid})))",
"\"\"\" Sets the specified cookie for the user identified by the Context and",
"apikey} if path is not None: params['path'] = path if secure is not",
"the authentication session information for the user identified by the Context and User",
"language governing permissions and # limitations under the License. \"\"\" This file was",
"{} if contextid is not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/',",
"with the given ID that belongs to the context with the given ID.",
"given ID that belongs to the context with the given ID, should be",
"the Context and User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey':",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"contextid, userid, apikey=''): \"\"\" Removes the user with the given ID that belongs",
"the given ID. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if",
"state for the user identified by the Context and User Ids. \"\"\" params",
"user, with the given ID that belongs to the context with the given",
"ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid):",
"is an HTTP/HTTPS proxy for assessing web application security. # # Copyright 2017",
"identified by the Context and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId':",
"to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId':",
"polling verification strategy has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid,",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"authentication credentials of the user with given ID that belongs to the context",
"= {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey}",
"contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\" Sets",
"credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self, contextid,",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"# Copyright 2017 the ZAP development team # # Licensed under the Apache",
"user identified by the Context and User Ids, e.g. cookies and realm credentials.",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"the given ID that belongs to the context with the given ID, should",
"not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid,",
"the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid})))",
"ID. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is",
"required by applicable law or agreed to in writing, software # distributed under",
"def __init__(self, zap): self.zap = zap def users_list(self, contextid=None): \"\"\" Gets a list",
"the user identified by the Context and User Ids. \"\"\" params = {'contextId':",
"applicable law or agreed to in writing, software # distributed under the License",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid): \"\"\"",
"will only work if the polling verification strategy has been configured. \"\"\" return",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain, name, value, path=None,",
"'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid, name, apikey=''): \"\"\" Renames the",
"and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def",
"User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self,",
"Context and User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'domain': domain,",
"or agreed to in writing, software # distributed under the License is distributed",
"the identified user, returning the authentication request and whether it appears to have",
"belongs to the context with the given ID, should be enabled. \"\"\" return",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"the authentication credentials of the user with given ID that belongs to the",
"context with the given ID. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey':",
"'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration parameters",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\" Gets the authentication",
"userid, enabled, apikey=''): \"\"\" Sets whether or not the user, with the given",
"userid}))) def new_user(self, contextid, name, apikey=''): \"\"\" Creates a new user with the",
"userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets",
"userid}))) def get_authentication_session(self, contextid, userid): \"\"\" Gets the authentication session information for the",
"proxy for assessing web application security. # # Copyright 2017 the ZAP development",
"given ID, or all users if none provided. \"\"\" params = {} if",
"poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to poll as the identified user, returning",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"userid, apikey=''): \"\"\" Tries to authenticate as the identified user, returning the authentication",
"def remove_user(self, contextid, userid, apikey=''): \"\"\" Removes the user with the given ID",
"= authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\"",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"configuration parameters for the credentials of the context with the given ID. \"\"\"",
"params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''):",
"get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration parameters for the credentials of the context",
"context with the given ID, should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/',",
"Tries to authenticate as the identified user, returning the authentication request and whether",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"License. # You may obtain a copy of the License at # #",
"ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey}))) def",
"belongs to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/',",
"of the user with the given ID that belongs to the context with",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): \"\"\" Gets the data",
"data of the user with the given ID that belongs to the context",
"the authentication state for the user identified by the Context and User Ids.",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self,",
"the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid,",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid,",
"the context with the given ID. \"\"\" params = {'contextId': contextid, 'userId': userid,",
"the given ID that belongs to the context with the given ID. \"\"\"",
"compliance with the License. # You may obtain a copy of the License",
"+ 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration",
"and # limitations under the License. \"\"\" This file was automatically generated. \"\"\"",
"to have succeeded. This will only work if the polling verification strategy has",
"+ 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to authenticate as",
"'name': name, 'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''): \"\"\" Removes the user",
"Gets the authentication session information for the user identified by the Context and",
"given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid):",
"userid, domain, name, value, path=None, secure=None, apikey=''): \"\"\" Sets the specified cookie for",
"Gets the data of the user with the given ID that belongs to",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"get_user_by_id(self, contextid, userid): \"\"\" Gets the data of the user with the given",
"remove_user(self, contextid, userid, apikey=''): \"\"\" Removes the user with the given ID that",
"the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid,",
"for the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId':",
"not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll",
"apikey=''): \"\"\" Removes the user with the given ID that belongs to the",
"authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries",
"returning the authentication request and whether it appears to have succeeded. This will",
"poll as the identified user, returning the authentication request and whether it appears",
"domain, name, value, path=None, secure=None, apikey=''): \"\"\" Sets the specified cookie for the",
"'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid, name, apikey=''): \"\"\"",
"contextid, userid, enabled, apikey=''): \"\"\" Sets whether or not the user, with the",
"specified cookie for the user identified by the Context and User Ids. \"\"\"",
"with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid})))",
"not use this file except in compliance with the License. # You may",
"the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey':",
"if lastpollresult is not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is not None:",
"Renames the user with the given ID that belongs to the context with",
"e.g. cookies and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId':",
"contextid, name, apikey=''): \"\"\" Creates a new user with the given name for",
"contextid, userid, name, apikey=''): \"\"\" Renames the user with the given ID that",
"in the authentication state for the user identified by the Context and User",
"if requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params)))",
"user identified by the Context and User Ids. \"\"\" params = {'contextId': contextid,",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self,",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self,",
"# Zed Attack Proxy (ZAP) and its related class files. # # ZAP",
"path is not None: params['path'] = path if secure is not None: params['secure']",
"None: params['path'] = path if secure is not None: params['secure'] = secure return",
"Context and User Ids, e.g. cookies and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"contextid, userid): \"\"\" Gets the authentication session information for the user identified by",
"params['path'] = path if secure is not None: params['secure'] = secure return six.next(six.itervalues(self.zap._request(self.zap.base",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"'userId': userid}))) def new_user(self, contextid, name, apikey=''): \"\"\" Creates a new user with",
"for assessing web application security. # # Copyright 2017 the ZAP development team",
"should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled':",
"the specified cookie for the user identified by the Context and User Ids.",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets",
"contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): \"\"\" Gets the",
"with given ID that belongs to the context with the given ID. \"\"\"",
"if none provided. \"\"\" params = {} if contextid is not None: params['contextId']",
"context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId':",
"of users that belong to the context with the given ID, or all",
"+ 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''):",
"given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self,",
"the authentication request and whether it appears to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"given ID that belongs to the context with the given ID. \"\"\" params",
"# you may not use this file except in compliance with the License.",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): \"\"\" Gets the data of",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\" Gets the",
"configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def",
"apikey}))) def set_user_name(self, contextid, userid, name, apikey=''): \"\"\" Renames the user with the",
"Copyright 2017 the ZAP development team # # Licensed under the Apache License,",
"import six class users(object): def __init__(self, zap): self.zap = zap def users_list(self, contextid=None):",
"agreed to in writing, software # distributed under the License is distributed on",
"def set_user_name(self, contextid, userid, name, apikey=''): \"\"\" Renames the user with the given",
"\"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is not",
"requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in the authentication state for the user identified",
"def set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\" Sets whether or not the user,",
"'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the",
"if the polling verification strategy has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/',",
"(the \"License\"); # you may not use this file except in compliance with",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\"",
"is not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self,",
"authentication state for the user identified by the Context and User Ids. \"\"\"",
"strategy has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid,",
"# Unless required by applicable law or agreed to in writing, software #",
"ZAP is an HTTP/HTTPS proxy for assessing web application security. # # Copyright",
"apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/',",
"by applicable law or agreed to in writing, software # distributed under the",
"{'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\" Gets the authentication credentials of the",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid,",
"the authentication state information for the user identified by the Context and User",
"{'contextId': contextid, 'userId': userid, 'apikey': apikey} if lastpollresult is not None: params['lastPollResult'] =",
"none provided. \"\"\" params = {} if contextid is not None: params['contextId'] =",
"value, 'apikey': apikey} if path is not None: params['path'] = path if secure",
"users that belong to the context with the given ID, or all users",
"\"\"\" Gets the authentication state information for the user identified by the Context",
"userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets",
"value, path=None, secure=None, apikey=''): \"\"\" Sets the specified cookie for the user identified",
"given ID that belongs to the context with the given ID. \"\"\" return",
"None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms if",
"lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/',",
"This file was automatically generated. \"\"\" import six class users(object): def __init__(self, zap):",
"file except in compliance with the License. # You may obtain a copy",
"class users(object): def __init__(self, zap): self.zap = zap def users_list(self, contextid=None): \"\"\" Gets",
"to poll as the identified user, returning the authentication request and whether it",
"limitations under the License. \"\"\" This file was automatically generated. \"\"\" import six",
"parameters for the credentials of the context with the given ID. \"\"\" return",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey})))",
"# ZAP is an HTTP/HTTPS proxy for assessing web application security. # #",
"authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to authenticate as the identified user, returning",
"name, apikey=''): \"\"\" Renames the user with the given ID that belongs to",
"the given ID, or all users if none provided. \"\"\" params = {}",
"'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''):",
"domain, 'name': name, 'value': value, 'apikey': apikey} if path is not None: params['path']",
"= {'contextId': contextid, 'userId': userid, 'apikey': apikey} if lastpollresult is not None: params['lastPollResult']",
"License for the specific language governing permissions and # limitations under the License.",
"zap): self.zap = zap def users_list(self, contextid=None): \"\"\" Gets a list of users",
"userid, apikey=''): \"\"\" Removes the user with the given ID that belongs to",
"= contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): \"\"\" Gets",
"to in writing, software # distributed under the License is distributed on an",
"for the credentials of the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"was automatically generated. \"\"\" import six class users(object): def __init__(self, zap): self.zap =",
"user with the given ID that belongs to the context with the given",
"implied. # See the License for the specific language governing permissions and #",
"context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name':",
"params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey':",
"\"License\"); # you may not use this file except in compliance with the",
"for the user identified by the Context and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication credentials for the user with",
"application security. # # Copyright 2017 the ZAP development team # # Licensed",
"context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def",
"params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base",
"\"\"\" Sets the authentication credentials for the user with the given ID that",
"under the License. \"\"\" This file was automatically generated. \"\"\" import six class",
"apikey}))) def poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to poll as the identified",
"contextid, 'userId': userid}))) def new_user(self, contextid, name, apikey=''): \"\"\" Creates a new user",
"Tries to poll as the identified user, returning the authentication request and whether",
"Zed Attack Proxy (ZAP) and its related class files. # # ZAP is",
"'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\" Sets whether",
"apikey=''): \"\"\" Sets the authentication credentials for the user with the given ID",
"or implied. # See the License for the specific language governing permissions and",
"userid): \"\"\" Gets the data of the user with the given ID that",
"2017 the ZAP development team # # Licensed under the Apache License, Version",
"identified by the Context and User Ids, e.g. cookies and realm credentials. \"\"\"",
"= lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is",
"Sets the authentication credentials for the user with the given ID that belongs",
"contextid=None): \"\"\" Gets a list of users that belong to the context with",
"team # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"of the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId':",
"new user with the given name for the context with the given ID.",
"whether it appears to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid,",
"if path is not None: params['path'] = path if secure is not None:",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"+ 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid,",
"userid, name, apikey=''): \"\"\" Renames the user with the given ID that belongs",
"def get_authentication_state(self, contextid, userid): \"\"\" Gets the authentication state information for the user",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to",
"not the user, with the given ID that belongs to the context with",
"'userId': userid}))) def get_authentication_session(self, contextid, userid): \"\"\" Gets the authentication session information for",
"session information for the user identified by the Context and User Ids, e.g.",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId':",
"apikey=''): \"\"\" Creates a new user with the given name for the context",
"ID that belongs to the context with the given ID, should be enabled.",
"in writing, software # distributed under the License is distributed on an \"AS",
"def get_user_by_id(self, contextid, userid): \"\"\" Gets the data of the user with the",
"the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid,",
"the Context and User Ids, e.g. cookies and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"user with given ID that belongs to the context with the given ID.",
"has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey':",
"is not None: params['path'] = path if secure is not None: params['secure'] =",
"Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value':",
"name for the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/',",
"apikey=''): \"\"\" Tries to authenticate as the identified user, returning the authentication request",
"state information for the user identified by the Context and User Ids. \"\"\"",
"and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def",
"apikey=''): \"\"\" Sets fields in the authentication state for the user identified by",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"security. # # Copyright 2017 the ZAP development team # # Licensed under",
"users if none provided. \"\"\" params = {} if contextid is not None:",
"permissions and # limitations under the License. \"\"\" This file was automatically generated.",
"to authenticate as the identified user, returning the authentication request and whether it",
"def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration parameters for the credentials of the",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self, contextid, name,",
"'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey} if path is",
"to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey':",
"contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams",
"'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid): \"\"\" Gets the authentication",
"with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid})))",
"requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def",
"\"\"\" Tries to authenticate as the identified user, returning the authentication request and",
"with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid,",
"with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self,",
"apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"authentication request and whether it appears to have succeeded. This will only work",
"= lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base +",
"def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''): \"\"\" Sets the",
"you may not use this file except in compliance with the License. #",
"\"\"\" Gets a list of users that belong to the context with the",
"userid}))) def get_authentication_state(self, contextid, userid): \"\"\" Gets the authentication state information for the",
"'apikey': apikey} if path is not None: params['path'] = path if secure is",
"contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''):",
"'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''): \"\"\"",
"Gets the authentication credentials of the user with given ID that belongs to",
"# # ZAP is an HTTP/HTTPS proxy for assessing web application security. #",
"params))) def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''): \"\"\" Sets",
"zap def users_list(self, contextid=None): \"\"\" Gets a list of users that belong to",
"is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self,",
"{'contextId': contextid, 'name': name, 'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''): \"\"\" Removes",
"or not the user, with the given ID that belongs to the context",
"name, 'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''): \"\"\" Removes the user with",
"given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))",
"for the user identified by the Context and User Ids. \"\"\" params =",
"authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def",
"Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if lastpollresult is",
"use this file except in compliance with the License. # You may obtain",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid,",
"'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to poll",
"Attack Proxy (ZAP) and its related class files. # # ZAP is an",
"for the specific language governing permissions and # limitations under the License. \"\"\"",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey})))",
"{'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries",
"authenticate as the identified user, returning the authentication request and whether it appears",
"web application security. # # Copyright 2017 the ZAP development team # #",
"the ZAP development team # # Licensed under the Apache License, Version 2.0",
"contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey} if path",
"This will only work if the polling verification strategy has been configured. \"\"\"",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid): \"\"\"",
"# limitations under the License. \"\"\" This file was automatically generated. \"\"\" import",
"contextid, userid): \"\"\" Gets the data of the user with the given ID",
"that belong to the context with the given ID, or all users if",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"a new user with the given name for the context with the given",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid,",
"authentication state information for the user identified by the Context and User Ids.",
"or all users if none provided. \"\"\" params = {} if contextid is",
"the given name for the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"the configuration parameters for the credentials of the context with the given ID.",
"params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is not None:",
"\"\"\" Sets fields in the authentication state for the user identified by the",
"params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if lastpollresult is not None:",
"belongs to the context with the given ID. \"\"\" params = {'contextId': contextid,",
"users_list(self, contextid=None): \"\"\" Gets a list of users that belong to the context",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"\"\"\" Gets the authentication session information for the user identified by the Context",
"ZAP development team # # Licensed under the Apache License, Version 2.0 (the",
"appears to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid,",
"userid, 'apikey': apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base",
"contextid, 'userId': userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''):",
"ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\"",
"'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid): \"\"\" Gets the authentication",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid,",
"if secure is not None: params['secure'] = secure return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setCookie/', params)))",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid):",
"\"\"\" params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value,",
"\"\"\" Gets the configuration parameters for the credentials of the context with the",
"lastpollresult is not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs']",
"path=None, secure=None, apikey=''): \"\"\" Sets the specified cookie for the user identified by",
"userid): \"\"\" Gets the authentication credentials of the user with given ID that",
"userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\" Sets whether or",
"apikey=''): \"\"\" Sets the specified cookie for the user identified by the Context",
"# # Unless required by applicable law or agreed to in writing, software",
"is not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs'] =",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey}))) def remove_user(self, contextid,",
"= requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain, name,",
"express or implied. # See the License for the specific language governing permissions",
"ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid,",
"userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration parameters for the credentials of",
"License. \"\"\" This file was automatically generated. \"\"\" import six class users(object): def",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey}))) def remove_user(self, contextid, userid,",
"get_authentication_state(self, contextid, userid): \"\"\" Gets the authentication state information for the user identified",
"the Context and User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'domain':",
"ID, or all users if none provided. \"\"\" params = {} if contextid",
"apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\" Sets whether or not the",
"an HTTP/HTTPS proxy for assessing web application security. # # Copyright 2017 the",
"and User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if",
"to the context with the given ID, should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"either express or implied. # See the License for the specific language governing",
"to the context with the given ID, or all users if none provided.",
"User Ids, e.g. cookies and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId':",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self, contextid, name, apikey=''): \"\"\"",
"'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''): \"\"\"",
"{'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid): \"\"\" Gets the authentication state",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''): \"\"\" Removes the user with the",
"that belongs to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"the user with given ID that belongs to the context with the given",
"that belongs to the context with the given ID. \"\"\" params = {'contextId':",
"secure=None, apikey=''): \"\"\" Sets the specified cookie for the user identified by the",
"{'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None,",
"have succeeded. This will only work if the polling verification strategy has been",
"not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid,",
"the specific language governing permissions and # limitations under the License. \"\"\" This",
"contextid, userid): \"\"\" Gets the authentication state information for the user identified by",
"'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid,",
"the License. # You may obtain a copy of the License at #",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid,",
"{'contextId': contextid, 'userId': userid}))) def new_user(self, contextid, name, apikey=''): \"\"\" Creates a new",
"set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication credentials for the user",
"'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration parameters for the credentials",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid,",
"Context and User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"user, returning the authentication request and whether it appears to have succeeded. This",
"authentication session information for the user identified by the Context and User Ids,",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"name, 'value': value, 'apikey': apikey} if path is not None: params['path'] = path",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\" Gets",
"six class users(object): def __init__(self, zap): self.zap = zap def users_list(self, contextid=None): \"\"\"",
"name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication",
"context with the given ID, or all users if none provided. \"\"\" params",
"given name for the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"and whether it appears to have succeeded. This will only work if the",
"'userId': userid, 'apikey': apikey} if lastpollresult is not None: params['lastPollResult'] = lastpollresult if",
"for the user with the given ID that belongs to the context with",
"authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication credentials for the user with the given",
"by the Context and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid,",
"the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid,",
"apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\" Sets the authentication credentials for",
"the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid,",
"been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))",
"lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in the authentication state for the user",
"fields in the authentication state for the user identified by the Context and",
"params['lastPollResult'] = lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll",
"ID, should be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid,",
"Gets the configuration parameters for the credentials of the context with the given",
"def poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to poll as the identified user,",
"apikey} if lastpollresult is not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is not",
"and User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name':",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None,",
"with the License. # You may obtain a copy of the License at",
"Removes the user with the given ID that belongs to the context with",
"lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll']",
"'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''): \"\"\" Sets whether or not",
"new_user(self, contextid, name, apikey=''): \"\"\" Creates a new user with the given name",
"by the Context and User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid,",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to poll as",
"assessing web application security. # # Copyright 2017 the ZAP development team #",
"given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey})))",
"credentials of the user with given ID that belongs to the context with",
"Proxy (ZAP) and its related class files. # # ZAP is an HTTP/HTTPS",
"specific language governing permissions and # limitations under the License. \"\"\" This file",
"the user identified by the Context and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"list of users that belong to the context with the given ID, or",
"# # Copyright 2017 the ZAP development team # # Licensed under the",
"returning the authentication request and whether it appears to have succeeded. \"\"\" return",
"law or agreed to in writing, software # distributed under the License is",
"\"\"\" Gets the authentication credentials of the user with given ID that belongs",
"(ZAP) and its related class files. # # ZAP is an HTTP/HTTPS proxy",
"the License for the specific language governing permissions and # limitations under the",
"def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to authenticate as the identified user,",
"None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid):",
"+ 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None,",
"not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms",
"contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid): \"\"\" Gets the authentication state information",
"+ 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''):",
"the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def",
"def get_authentication_credentials(self, contextid, userid): \"\"\" Gets the authentication credentials of the user with",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"belong to the context with the given ID, or all users if none",
"information for the user identified by the Context and User Ids, e.g. cookies",
"path if secure is not None: params['secure'] = secure return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setCookie/',",
"def users_list(self, contextid=None): \"\"\" Gets a list of users that belong to the",
"contextid, 'name': name, 'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''): \"\"\" Removes the",
"identified by the Context and User Ids. \"\"\" params = {'contextId': contextid, 'userId':",
"ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey':",
"\"\"\" Tries to poll as the identified user, returning the authentication request and",
"contextid, userid, apikey=''): \"\"\" Tries to poll as the identified user, returning the",
"users(object): def __init__(self, zap): self.zap = zap def users_list(self, contextid=None): \"\"\" Gets a",
"in compliance with the License. # You may obtain a copy of the",
"'userId': userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): \"\"\"",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"contextid): \"\"\" Gets the configuration parameters for the credentials of the context with",
"governing permissions and # limitations under the License. \"\"\" This file was automatically",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"ID that belongs to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"contextid is not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def",
"enabled, apikey=''): \"\"\" Sets whether or not the user, with the given ID",
"userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey} if path is not",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"user, returning the authentication request and whether it appears to have succeeded. \"\"\"",
"request and whether it appears to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/',",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"credentials of the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/',",
"enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid, name, apikey=''): \"\"\" Renames the user",
"'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields",
"realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self,",
"+ 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid,",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"+ 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self, contextid, name, apikey=''): \"\"\" Creates",
"{'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid): \"\"\" Gets the authentication session",
"HTTP/HTTPS proxy for assessing web application security. # # Copyright 2017 the ZAP",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"by the Context and User Ids, e.g. cookies and realm credentials. \"\"\" return",
"apikey=''): \"\"\" Sets whether or not the user, with the given ID that",
"credentials for the user with the given ID that belongs to the context",
"to the context with the given ID. \"\"\" params = {'contextId': contextid, 'userId':",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self, contextid, name, apikey=''):",
"to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId':",
"params))) def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to authenticate as the identified",
"{'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration parameters for",
"userid): \"\"\" Gets the authentication session information for the user identified by the",
"params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain,",
"and its related class files. # # ZAP is an HTTP/HTTPS proxy for",
"\"\"\" Creates a new user with the given name for the context with",
"apikey}))) def remove_user(self, contextid, userid, apikey=''): \"\"\" Removes the user with the given",
"have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))",
"be enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled,",
"set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in the authentication",
"the Context and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId':",
"succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid): \"\"\" Gets",
"set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''): \"\"\" Sets the specified",
"provided. \"\"\" params = {} if contextid is not None: params['contextId'] = contextid",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid):",
"the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def",
"only work if the polling verification strategy has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base",
"work if the polling verification strategy has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base +",
"+ 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''):",
"Context and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid})))",
"userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in the authentication state for",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"information for the user identified by the Context and User Ids. \"\"\" return",
"belongs to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/',",
"six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self,",
"lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not",
"\"\"\" Sets whether or not the user, with the given ID that belongs",
"'domain': domain, 'name': name, 'value': value, 'apikey': apikey} if path is not None:",
"a list of users that belong to the context with the given ID,",
"Sets whether or not the user, with the given ID that belongs to",
"\"\"\" This file was automatically generated. \"\"\" import six class users(object): def __init__(self,",
"contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in the authentication state",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"\"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey}))) def remove_user(self,",
"None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid,",
"contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): \"\"\" Gets the configuration parameters for the",
"the user identified by the Context and User Ids, e.g. cookies and realm",
"params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): \"\"\"",
"'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid,",
"with the given ID, or all users if none provided. \"\"\" params =",
"'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\"",
"class files. # # ZAP is an HTTP/HTTPS proxy for assessing web application",
"'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to poll as the",
"is not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll'] =",
"return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to",
"set_user_name(self, contextid, userid, name, apikey=''): \"\"\" Renames the user with the given ID",
"= {'contextId': contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams']",
"its related class files. # # ZAP is an HTTP/HTTPS proxy for assessing",
"given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self,",
"belongs to the context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/',",
"whether it appears to have succeeded. This will only work if the polling",
"get_authentication_session(self, contextid, userid): \"\"\" Gets the authentication session information for the user identified",
"the License. \"\"\" This file was automatically generated. \"\"\" import six class users(object):",
"Gets a list of users that belong to the context with the given",
"given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name,",
"'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''): \"\"\" Tries to authenticate as the",
"the context with the given ID, or all users if none provided. \"\"\"",
"the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey':",
"the user with the given ID that belongs to the context with the",
"given ID. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams",
"it appears to have succeeded. This will only work if the polling verification",
"file was automatically generated. \"\"\" import six class users(object): def __init__(self, zap): self.zap",
"user with the given name for the context with the given ID. \"\"\"",
"if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params)))",
"+ 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): \"\"\" Gets the authentication credentials",
"\"\"\" Renames the user with the given ID that belongs to the context",
"cookie for the user identified by the Context and User Ids. \"\"\" params",
"contextid, userid, apikey=''): \"\"\" Tries to authenticate as the identified user, returning the",
"context with the given ID. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId':",
"ID that belongs to the context with the given ID. \"\"\" params =",
"user identified by the Context and User Ids. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/',",
"with the given name for the context with the given ID. \"\"\" return",
"lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): \"\"\" Sets fields in the authentication state for the",
"name, apikey=''): \"\"\" Creates a new user with the given name for the",
"'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''): \"\"\"",
"def get_authentication_session(self, contextid, userid): \"\"\" Gets the authentication session information for the user",
"\"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if lastpollresult is not",
"enabled. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey':",
"appears to have succeeded. This will only work if the polling verification strategy",
"the data of the user with the given ID that belongs to the",
"'value': value, 'apikey': apikey} if path is not None: params['path'] = path if",
"the polling verification strategy has been configured. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId':",
"files. # # ZAP is an HTTP/HTTPS proxy for assessing web application security.",
"get_authentication_credentials(self, contextid, userid): \"\"\" Gets the authentication credentials of the user with given",
"succeeded. This will only work if the polling verification strategy has been configured.",
"'apikey': apikey} if lastpollresult is not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"authentication credentials for the user with the given ID that belongs to the",
"None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return",
"User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if lastpollresult",
"not None: params['path'] = path if secure is not None: params['secure'] = secure",
"Ids, e.g. cookies and realm credentials. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid,",
"contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid): \"\"\" Gets the authentication session information",
"and whether it appears to have succeeded. \"\"\" return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId':",
"'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None,",
"+ 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid): \"\"\" Gets the",
"User Ids. \"\"\" params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name,",
"'apikey': apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base +"
] |
[
"]) # the left `None` becomes an empty string # the right `None`",
"'2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'),",
"'Dog'], ]) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( [ ['Foo',",
"if self.headers_written: row_offset = self.headers_written for row_index, row in enumerate(values, start=row_offset): for col_index,",
"def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30",
"[3, 4] ]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [ [1, 2],",
"self.assert_matches([], [ [None, 1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests",
"test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( ['Foo',",
"persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1,",
"['Snoopy', 'Dog'], ]) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( [",
"def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with",
"for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream)",
"2], [2, 3], [3, 4] ]) self.assert_matches( ['Foo', 'Bar'], [ [1, 2], [2,",
"with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1,",
"= ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if",
"[], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self):",
"[2, 3], [3, 4] ]) self.assert_matches( ['Foo', 'Bar'], [ [1, 2], [2, 3],",
"2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3, 1]) def",
"headers): for index, header in enumerate(headers): self.sheet.write(0, index, header) self.headers_written = True def",
"self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1,",
"def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2, 4]) def test_order_is_significant(self): with",
"with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2,",
"testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None,",
"@property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30), \"WHERE",
"an empty string # the right `None` gets dropped self.assert_matches([], [ ['', 1,",
"[ [1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'],",
"5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date =",
"testing.assert_list_equal((x for x in range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1, 2], (x for",
"'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches(",
"set_values(self, values): row_offset = 0 if self.headers_written: row_offset = 1 for row_index, row",
"1]) def test_generators(self): testing.assert_list_equal((x for x in range(3)), (x for x in range(3)))",
"4] ]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [ [1, 2], [2,",
"\"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls",
"xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with",
"self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2,",
"testing.assert_list_equal([1, 2, 3], [1, 2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3],",
"]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the left",
"import RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person, db def setup_module(): import flask assert",
"index, header) self.headers_written = True def set_values(self, values): row_offset = 0 if self.headers_written:",
"test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1, 2, 3]) testing.assert_list_equal((1, 2, 3), [1,",
"'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), )) class",
"('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only",
"'hello', None, True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None]",
"TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def filters(self): return ( ('make', 'eq', 'foo', \"WHERE",
"dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self, _): Person.delete_cascaded()",
"import flask assert not flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs as",
"xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written = False def set_headers(self, headers):",
"self.headers_written = None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is required.*'):",
"def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the right `None`",
"'sqlite': pytest.skip('sqlite-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1,",
"= '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), )",
"[1, 2], [2, 3], [3, 4] ]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'],",
"test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self):",
"TIME)\"), ) def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019,",
"testing.assert_list_equal([1, 2, 3], [1, 2, 3]) testing.assert_list_equal((1, 2, 3), [1, 2, 3]) testing.assert_list_equal('123',",
"with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None,",
"self.headers_written = True def set_values(self, values): row_offset = 0 if self.headers_written: row_offset =",
"@property def filters(self): return ( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model',",
"3), [1, 2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with",
"[1, 2, 3]) testing.assert_list_equal((1, 2, 3), [1, 2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self):",
"RadioGrid @property def filters(self): return ( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"),",
"None self.set_values(headers) self.headers_written = len(headers) def set_values(self, values): row_offset = 0 if self.headers_written:",
"class TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet",
"1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'),",
"]) self.assert_matches([], [ [1, 1.23, 'hello', '', True, False] ]) def test_none_is_mangled(self): self.set_values([",
"with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], []) def set_headers(self, headers):",
"flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([],",
"[2, 3], [3, 4] ] ) def test_value_types(self): self.set_values([ [1, 1.23, 'hello', None,",
"[2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([",
"= 'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year = 1945\"), ) @classmethod def setup_class(cls):",
"mock import pytest import xlsxwriter import xlwt from webgrid import testing from webgrid_ta.grids",
"= RadioGrid @property def filters(self): return ( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make =",
"pytest.skip('sql server-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1,",
"with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3, 1]) def test_generators(self): testing.assert_list_equal((x for x",
"TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'),",
"for x in range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1, 2], (x for x",
"[]) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1, 2, 3]])",
"1.23, 'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', '', True,",
"'eq', '1945', \"WHERE sabwp_radios.year = 1945\"), ) @classmethod def setup_class(cls): if db.engine.dialect.name !=",
"2]) testing.assert_list_equal([0, 1, 2], (x for x in range(3))) class TestAssertRenderedXlsMatches: def setup(self):",
"None): with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], []) def set_headers(self, headers): assert self.headers_written",
"xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None)",
"BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1,",
"row_offset = 0 if self.headers_written: row_offset = 1 for row_index, row in enumerate(values,",
"col_index, value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with",
"self.assert_matches([], [ [1, 1.23, 'hello', '', True, False] ]) def test_none_is_mangled(self): self.set_values([ [None,",
"import Person, db def setup_module(): import flask assert not flask.request class TestAssertListEqual: \"\"\"Verify",
"testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def",
"[1, 2], [2, 3], [3, 4] ]) self.assert_matches( ['Foo', 'Bar'], [ [1, 2],",
"( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name",
")) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'),",
"assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None,",
"Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), )",
"None, True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ])",
"] ) def test_value_types(self): self.set_values([ [1, 1.23, 'hello', None, True, False] ]) self.assert_matches([],",
"@classmethod def setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property def filters(self): return",
"self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written = False def",
"x in range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1')",
"pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], []) def set_headers(self, headers): assert self.headers_written is None",
"3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ])",
"'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property def",
"05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1,",
"dropped self.assert_matches([], [ ['', 1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream",
"2, 3]]) def test_multiple_rows(self): self.set_values([ [1, 2, 3], [2, 3, 4] ]) self.assert_matches([],",
"False] ]) self.assert_matches([], [ [1, 1.23, 'hello', '', True, False] ]) def test_none_is_mangled(self):",
"True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) #",
"('year', 'eq', '1945', \"WHERE sabwp_radios.year = 1945\"), ) @classmethod def setup_class(cls): if db.engine.dialect.name",
"]) self.assert_matches( ['Foo', 'Bar'], [ [1, 2], [2, 3], [3, 4] ] )",
"TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1,",
"dt from io import BytesIO from unittest import mock import pytest import xlsxwriter",
"'2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class",
"'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'sqlite':",
"[1, 1.23, 'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', '',",
"None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self):",
"('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only",
"test_value_types(self): self.set_values([ [1, 1.23, 'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23,",
"'05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts',",
"test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError):",
"self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy',",
"self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the left `None` becomes an",
"3], [3, 4] ]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [ [1,",
"with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None,",
"Person, db def setup_module(): import flask assert not flask.request class TestAssertListEqual: \"\"\"Verify the",
"self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self): self.set_values([ [1, 2, 3], [2, 3, 4]",
"range(3)), (x for x in range(3))) testing.assert_list_equal((x for x in range(3)), [0, 1,",
"x in range(3))) testing.assert_list_equal((x for x in range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1,",
"dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01',",
"TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def",
"'mssql': pytest.skip('sql server-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1,",
"]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'),",
"from webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person, db def setup_module(): import",
"row_offset = self.headers_written for row_index, row in enumerate(values, start=row_offset): for col_index, value in",
"self.stream = BytesIO() self.headers_written = False def set_headers(self, headers): for index, header in",
"4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3, 1]) def test_generators(self):",
"[0, 1, 2]) testing.assert_list_equal([0, 1, 2], (x for x in range(3))) class TestAssertRenderedXlsMatches:",
"with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], []) def set_headers(self, headers): assert self.headers_written is",
"[2, 3, 4] ]) self.assert_matches([], [ [1, 2, 3], [2, 3, 4] ])",
"1.23, 'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', None, True,",
"0 if self.headers_written: row_offset = self.headers_written for row_index, row in enumerate(values, start=row_offset): for",
"test_generators(self): testing.assert_list_equal((x for x in range(3)), (x for x in range(3))) testing.assert_list_equal((x for",
"'hello', None] ]) # the left `None` becomes an empty string # the",
"left `None` becomes an empty string # the right `None` gets dropped self.assert_matches([],",
"30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start",
"('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name !=",
"1, 1.23, 'hello', None] ]) # the left `None` becomes an empty string",
"1.23, 'hello', '', True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello',",
"1, 1, 5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created',",
"TIME WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests = (",
"4] ] ) def test_value_types(self): self.set_values([ [1, 1.23, 'hello', None, True, False] ])",
"= BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None",
"'01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'),",
"1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts',",
"= xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self): with",
"setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property def filters(self): return ( ('createdts',",
"'postgresql': pytest.skip('postgres-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1,",
"'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year",
"gets dropped self.assert_matches([], [ [None, 1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls =",
"# the left `None` becomes an empty string # the right `None` gets",
"self.assert_matches([], [ [1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo',",
"[1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1,",
"db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018,",
"test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30),",
"'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year',",
"CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests",
"class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], [])",
"4] ]) self.assert_matches([], [ [1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self):",
"2], [2, 3], [3, 4] ] ) def test_value_types(self): self.set_values([ [1, 1.23, 'hello',",
"31), start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018",
"def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with",
"TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written",
"2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'],",
"def setup_module(): import flask assert not flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method",
"start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30",
"def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar'])",
"with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([],",
"value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers,",
"sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year = 1945\"), ) @classmethod def",
"1, 2], (x for x in range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook =",
"value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers,",
"[1, 1.23, 'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', None,",
"'123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self):",
"self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def",
"1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date",
"self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1, 2,",
"3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2, 3], [3,",
"'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only",
"testing.assert_list_equal((1, 2, 3), [1, 2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([],",
"2, 3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self): self.set_values([ [1, 2, 3], [2,",
"setup(self): self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written",
"3], [1, 2, 3]) testing.assert_list_equal((1, 2, 3), [1, 2, 3]) testing.assert_list_equal('123', '123') def",
"test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'),",
"('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time",
"= BytesIO() self.headers_written = False def set_headers(self, headers): for index, header in enumerate(headers):",
"[]) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], [])",
"'Bar']) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( ['Foo', 'Bar'], [",
"\"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date =",
"('model', 'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year =",
"TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet =",
"enumerate(headers): self.sheet.write(0, index, header) self.headers_written = True def set_values(self, values): row_offset = 0",
"x in range(3)), (x for x in range(3))) testing.assert_list_equal((x for x in range(3)),",
"WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts',",
"CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def filters(self): return",
"for row_index, row in enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index,",
"xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None,",
"= 0 if self.headers_written: row_offset = self.headers_written for row_index, row in enumerate(values, start=row_offset):",
"False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the",
"2, 3], [2, 3, 1]) def test_generators(self): testing.assert_list_equal((x for x in range(3)), (x",
"`None` gets dropped self.assert_matches([], [ [None, 1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls",
"`None` gets dropped self.assert_matches([], [ ['', 1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def",
"in range(3)), (x for x in range(3))) testing.assert_list_equal((x for x in range(3)), [0,",
"[ ['', 1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO()",
"3], [3, 4] ]) self.assert_matches( ['Foo', 'Bar'], [ [1, 2], [2, 3], [3,",
"expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1, 2, 3]) testing.assert_list_equal((1, 2,",
"[1, 2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError):",
"pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2, 4])",
"testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2, 4]) def",
"row in enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def",
"in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows)",
"test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1,",
"self.assert_matches([], [ [1, 1.23, 'hello', None, True, False] ]) def test_none_is_mangled(self): self.set_values([ [None,",
"performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1, 2, 3])",
"'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]])",
"[]) def set_headers(self, headers): assert self.headers_written is None self.set_values(headers) self.headers_written = len(headers) def",
"enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers,",
"in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows)",
"True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', None, True, False] ]) def",
"1, 2]) testing.assert_list_equal([0, 1, 2], (x for x in range(3))) class TestAssertRenderedXlsMatches: def",
"testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1, 2, 3]) testing.assert_list_equal((1, 2, 3), [1, 2,",
"2, 3), [1, 2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1])",
"pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [],",
"1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1,",
"'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'postgresql':",
"dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls =",
"05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE",
"self.assert_matches( ['Foo', 'Bar'], [ [1, 2], [2, 3], [3, 4] ] ) def",
"with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'],",
"xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with",
"self.headers_written: row_offset = 1 for row_index, row in enumerate(values, start=row_offset): for col_index, value",
"def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the left `None`",
"'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]])",
"1.23, 'hello', None, True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello',",
"1, 1.23, 'hello', None] ]) # the right `None` gets dropped self.assert_matches([], [",
"\"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq',",
"2], (x for x in range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook()",
"assert self.headers_written is None self.set_values(headers) self.headers_written = len(headers) def set_values(self, values): row_offset =",
"True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', '', True, False] ]) def",
"persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"),",
"range(3))) testing.assert_list_equal((x for x in range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1, 2], (x",
"test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo',",
"self.headers_written = len(headers) def set_values(self, values): row_offset = 0 if self.headers_written: row_offset =",
"self.set_values([ [1, 1.23, 'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello',",
"is required.*'): self.assert_matches([], []) def set_headers(self, headers): assert self.headers_written is None self.set_values(headers) self.headers_written",
"None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def",
"def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1, 2], [2, 3],",
"Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls =",
"range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1, 2], (x for x in range(3))) class",
"testing.assert_list_equal([0, 1, 2], (x for x in range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook",
"pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], [])",
"test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError):",
"'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls",
"None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def",
"4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2, 3], [3, 4]",
"for index, header in enumerate(headers): self.sheet.write(0, index, header) self.headers_written = True def set_values(self,",
"testing.assert_list_equal([1, 2, 3], [2, 3, 1]) def test_generators(self): testing.assert_list_equal((x for x in range(3)),",
"[]) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo',",
"None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']])",
"import datetime as dt from io import BytesIO from unittest import mock import",
"the left `None` becomes an empty string # the right `None` gets dropped",
"self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self):",
"from unittest import mock import pytest import xlsxwriter import xlwt from webgrid import",
"None] ]) # the left `None` becomes an empty string # the right",
"!= 'sqlite': pytest.skip('sqlite-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1,",
"'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory':",
"row_offset = 1 for row_index, row in enumerate(values, start=row_offset): for col_index, value in",
"if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property def filters(self): return ( ('createdts', 'eq',",
"]) self.assert_matches([], [ [1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers([",
"from webgrid_ta.model.entities import Person, db def setup_module(): import flask assert not flask.request class",
"self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [ [1, 2], [2, 3], [3,",
"'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', '', True, False]",
"1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream,",
"test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3, 1]) def test_generators(self): testing.assert_list_equal((x for",
"= 1945\"), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test')",
"filters(self): return ( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo',",
"= len(headers) def set_values(self, values): row_offset = 0 if self.headers_written: row_offset = self.headers_written",
"the right `None` gets dropped self.assert_matches([], [ [None, 1, 1.23, 'hello'] ]) class",
"match=r'openpyxl is required.*'): self.assert_matches([], []) def set_headers(self, headers): assert self.headers_written is None self.set_values(headers)",
"('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase):",
"'hello', None] ]) # the right `None` gets dropped self.assert_matches([], [ [None, 1,",
"[1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy',",
"30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid",
"test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the left `None` becomes",
"'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year = 1945\"),",
"filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts BETWEEN",
"self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self):",
"[ [None, 1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests =",
"['', 1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO() self.workbook",
"testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def",
"is None self.set_values(headers) self.headers_written = len(headers) def set_values(self, values): row_offset = 0 if",
"= self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl",
"<reponame>sourcery-ai-bot/webgrid<filename>webgrid/tests/test_testing.py import datetime as dt from io import BytesIO from unittest import mock",
"test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo',",
"grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), )",
"'Bar'], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self):",
"2, 3]) testing.assert_list_equal((1, 2, 3), [1, 2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with",
"]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the right",
"if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test') @property def filters(self): return ( ('createdts',",
"for x in range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook() self.sheet =",
"self.headers_written = False def set_headers(self, headers): for index, header in enumerate(headers): self.sheet.write(0, index,",
"[]) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2, 4]) def test_order_is_significant(self):",
"= 1 for row_index, row in enumerate(values, start=row_offset): for col_index, value in enumerate(row):",
"the right `None` gets dropped self.assert_matches([], [ ['', 1, 1.23, 'hello'] ]) class",
"persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME",
"becomes an empty string # the right `None` gets dropped self.assert_matches([], [ ['',",
"self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written =",
"self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written = False def set_headers(self, headers): for index, header",
"2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1],",
"self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests",
"flask assert not flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs as expected\"\"\"",
"with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1,",
"[2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2, 3],",
"2], [2, 3], [3, 4] ]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ],",
"persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date = '2018-01-01'\"),",
"1 for row_index, row in enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index,",
"self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is",
"testing from webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person, db def setup_module():",
"def set_values(self, values): row_offset = 0 if self.headers_written: row_offset = self.headers_written for row_index,",
"], [ [1, 2], [2, 3], [3, 4] ] ) def test_value_types(self): self.set_values([",
"`None` becomes an empty string # the right `None` gets dropped self.assert_matches([], [",
"def setup(self): self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written =",
"self.headers_written is None self.set_values(headers) self.headers_written = len(headers) def set_values(self, values): row_offset = 0",
"for x in range(3))) testing.assert_list_equal((x for x in range(3)), [0, 1, 2]) testing.assert_list_equal([0,",
"TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'),",
"1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE",
"TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def filters(self): return ( ('make',",
"= self.headers_written for row_index, row in enumerate(values, start=row_offset): for col_index, value in enumerate(row):",
"['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1, 2], [2, 3], [3, 4] ])",
"30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self, _): Person.delete_cascaded() Person.testing_create(",
"def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test') @property def filters(self): return",
"\"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time',",
"= xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written = False def set_headers(self,",
"'Bar'], [ [1, 2], [2, 3], [3, 4] ] ) def test_value_types(self): self.set_values([",
"self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( ['Foo', 'Bar'], [ [1,",
"row_index, row in enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value)",
"_): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30),",
"test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self): self.set_values([ [1, 2,",
"server-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5,",
"setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test') @property def filters(self): return (",
"io import BytesIO from unittest import mock import pytest import xlsxwriter import xlwt",
"'', True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ])",
"TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'),",
"if self.headers_written: row_offset = 1 for row_index, row in enumerate(values, start=row_offset): for col_index,",
"3], [2, 3, 4] ]) self.assert_matches([], [ [1, 2, 3], [2, 3, 4]",
"('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'),",
"'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property def",
"( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"),",
"3], [2, 3, 1]) def test_generators(self): testing.assert_list_equal((x for x in range(3)), (x for",
"[]) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self): self.set_values([",
"def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def",
"'hello'] ]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date',",
") def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019, 5,",
"3]) testing.assert_list_equal((1, 2, 3), [1, 2, 3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError):",
"(x for x in range(3))) testing.assert_list_equal((x for x in range(3)), [0, 1, 2])",
"def test_generators(self): testing.assert_list_equal((x for x in range(3)), (x for x in range(3))) testing.assert_list_equal((x",
"BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1,",
"TemporalGrid from webgrid_ta.model.entities import Person, db def setup_module(): import flask assert not flask.request",
"pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2,",
"[None, 1, 1.23, 'hello', None] ]) # the right `None` gets dropped self.assert_matches([],",
"pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], [])",
") @classmethod def setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property def filters(self):",
"1.23, 'hello', None] ]) # the left `None` becomes an empty string #",
"return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01",
"'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test') @property",
"test_multiple_rows(self): self.set_values([ [1, 2, 3], [2, 3, 4] ]) self.assert_matches([], [ [1, 2,",
"self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError):",
"'2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq',",
"30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM',",
"assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None,",
"self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError):",
"class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time',",
"index, header in enumerate(headers): self.sheet.write(0, index, header) self.headers_written = True def set_values(self, values):",
"def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'',",
"class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time',",
"None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo'])",
"[ [1, 1.23, 'hello', '', True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1,",
"AM', '05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests = (",
"= 'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945', \"WHERE",
"right `None` gets dropped self.assert_matches([], [ [None, 1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase):",
"empty string # the right `None` gets dropped self.assert_matches([], [ ['', 1, 1.23,",
"3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self): self.set_values([ [1, 2, 3], [2, 3,",
"from io import BytesIO from unittest import mock import pytest import xlsxwriter import",
"if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property def filters(self): return ( ('createdts', 'eq',",
"due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'),",
"in enumerate(headers): self.sheet.write(0, index, header) self.headers_written = True def set_values(self, values): row_offset =",
"setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1,",
"def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3, 1]) def test_generators(self): testing.assert_list_equal((x",
"('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test')",
"dt.date(2018, 1, 1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time",
"mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], []) def set_headers(self, headers): assert",
"]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2, 3], [3, 4] ])",
"'1945', \"WHERE sabwp_radios.year = 1945\"), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql':",
") class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def filters(self): return ( ('make', 'eq',",
"assert not flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs as expected\"\"\" def",
"string # the right `None` gets dropped self.assert_matches([], [ ['', 1, 1.23, 'hello']",
"def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def",
"import pytest import xlsxwriter import xlwt from webgrid import testing from webgrid_ta.grids import",
"setup_module(): import flask assert not flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs",
"05:30 AM', '05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests =",
"self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written = False def set_headers(self, headers): for",
"required.*'): self.assert_matches([], []) def set_headers(self, headers): assert self.headers_written is None self.set_values(headers) self.headers_written =",
"def filters(self): return ( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq',",
"'2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME",
"BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def filters(self):",
"def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1, 2, 3]) testing.assert_list_equal((1, 2, 3),",
"grid_cls = RadioGrid @property def filters(self): return ( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make",
"header in enumerate(headers): self.sheet.write(0, index, header) self.headers_written = True def set_values(self, values): row_offset",
"None] ]) # the right `None` gets dropped self.assert_matches([], [ [None, 1, 1.23,",
"[1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1,",
"self.sheet.write(0, index, header) self.headers_written = True def set_values(self, values): row_offset = 0 if",
"['Foo', 'Bar'], [ [1, 2], [2, 3], [3, 4] ] ) def test_value_types(self):",
"2, 3], [2, 3, 4] ]) self.assert_matches([], [ [1, 2, 3], [2, 3,",
"with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']],",
"def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts",
"method performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1, 2,",
"= None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([],",
"5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE",
"def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self): self.set_values([ [1,",
"\"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018,",
"value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError):",
") def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019',",
"class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time',",
"[3, 4] ]) self.assert_matches( ['Foo', 'Bar'], [ [1, 2], [2, 3], [3, 4]",
"1, 1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN",
"def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019, 5, 31),",
"'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test')",
"in range(3))) testing.assert_list_equal((x for x in range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1, 2],",
"setup(self): self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written = False",
"True def set_values(self, values): row_offset = 0 if self.headers_written: row_offset = 1 for",
"`assert_list_equal` method performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1,",
"'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year = 1945\"), ) @classmethod def setup_class(cls): if",
"datetime as dt from io import BytesIO from unittest import mock import pytest",
"as dt from io import BytesIO from unittest import mock import pytest import",
"start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows):",
"testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def",
"col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook,",
") @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test') @property def",
"def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with",
"[[1, 2, 3]]) def test_multiple_rows(self): self.set_values([ [1, 2, 3], [2, 3, 4] ])",
"AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def filters(self): return (",
"[2, 3, 1]) def test_generators(self): testing.assert_list_equal((x for x in range(3)), (x for x",
"None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], [])",
"values): row_offset = 0 if self.headers_written: row_offset = self.headers_written for row_index, row in",
"[1, 2, 3], [2, 3, 4] ]) self.assert_matches([], [ [1, 2, 3], [2,",
"[1, 2], [2, 3], [3, 4] ] ) def test_value_types(self): self.set_values([ [1, 1.23,",
"30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase):",
"= True def set_values(self, values): row_offset = 0 if self.headers_written: row_offset = 1",
"def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']])",
"def test_multiple_rows(self): self.set_values([ [1, 2, 3], [2, 3, 4] ]) self.assert_matches([], [ [1,",
"testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], [])",
"self.assert_matches([], []) def set_headers(self, headers): assert self.headers_written is None self.set_values(headers) self.headers_written = len(headers)",
"def setup(self): self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1')",
"'Bar'], ['Snoopy', 'Dog'], ], [ [1, 2], [2, 3], [3, 4] ] )",
"return ( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo', \"WHERE",
"pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [],",
"sabwp_radios.year = 1945\"), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only",
"1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO() self.workbook =",
"3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2,",
"pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3, 1]) def test_generators(self): testing.assert_list_equal((x for x in",
"row_offset = 0 if self.headers_written: row_offset = self.headers_written for row_index, row in enumerate(values,",
"test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError):",
"testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def",
"Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), ) def",
"dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"), ) class",
"def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'',",
"the `assert_list_equal` method performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3],",
"xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None)",
"test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError):",
"self.assert_matches([], [ [1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar'])",
"xlsxwriter import xlwt from webgrid import testing from webgrid_ta.grids import RadioGrid, TemporalGrid from",
"createdts=dt.datetime(2018, 1, 1, 5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), ) def test_expected_rows(self):",
"from webgrid import testing from webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person,",
"enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers,",
"dropped self.assert_matches([], [ [None, 1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid",
"def set_headers(self, headers): assert self.headers_written is None self.set_values(headers) self.headers_written = len(headers) def set_values(self,",
"for x in range(3)), (x for x in range(3))) testing.assert_list_equal((x for x in",
"test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], []) def set_headers(self,",
"4] ]) self.assert_matches( ['Foo', 'Bar'], [ [1, 2], [2, 3], [3, 4] ]",
"import xlsxwriter import xlwt from webgrid import testing from webgrid_ta.grids import RadioGrid, TemporalGrid",
"pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [1, 2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2,",
"2, 3], [1, 2, 3]) testing.assert_list_equal((1, 2, 3), [1, 2, 3]) testing.assert_list_equal('123', '123')",
"col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with",
"0 if self.headers_written: row_offset = 1 for row_index, row in enumerate(values, start=row_offset): for",
"[]) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], [])",
"'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE",
"'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls",
"set_values(self, values): row_offset = 0 if self.headers_written: row_offset = self.headers_written for row_index, row",
"1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1),",
"\"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS",
"2, 3], [1, 2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2,",
"ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date',",
"in range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream",
"'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year = 1945\"), )",
"self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def test_empty_xlsx(self):",
"self.headers_written for row_index, row in enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index,",
"TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'),",
"BytesIO() self.headers_written = False def set_headers(self, headers): for index, header in enumerate(headers): self.sheet.write(0,",
"'eq', dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq',",
"= False def set_headers(self, headers): for index, header in enumerate(headers): self.sheet.write(0, index, header)",
"!= 'postgresql': pytest.skip('postgres-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1,",
"= self.workbook.add_sheet('sheet1') self.stream = BytesIO() self.headers_written = False def set_headers(self, headers): for index,",
"('make', 'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model =",
"self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self): self.set_values([ [1, 2, 3],",
"3, 1]) def test_generators(self): testing.assert_list_equal((x for x in range(3)), (x for x in",
"test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self):",
"AS TIME WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests =",
"3], [1, 2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3,",
"value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError):",
"sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945',",
"None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self):",
"[1, 1.23, 'hello', '', True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23,",
"class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def filters(self): return ( ('make', 'eq', 'foo',",
"\"WHERE sabwp_radios.year = 1945\"), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql",
"as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2, 3], [1, 2, 3]) testing.assert_list_equal((1,",
"self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception,",
"False def set_headers(self, headers): for index, header in enumerate(headers): self.sheet.write(0, index, header) self.headers_written",
"import BytesIO from unittest import mock import pytest import xlsxwriter import xlwt from",
"self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1,",
"webgrid import testing from webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person, db",
"import mock import pytest import xlsxwriter import xlwt from webgrid import testing from",
"self.set_values(headers) self.headers_written = len(headers) def set_values(self, values): row_offset = 0 if self.headers_written: row_offset",
"for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close()",
"testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None,",
"]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [ [1, 2], [2, 3],",
"not flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal` method performs as expected\"\"\" def test_simple_equivalents(self):",
"@classmethod def setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property def filters(self): return",
"def set_values(self, values): row_offset = 0 if self.headers_written: row_offset = 1 for row_index,",
"set_headers(self, headers): assert self.headers_written is None self.set_values(headers) self.headers_written = len(headers) def set_values(self, values):",
"# the right `None` gets dropped self.assert_matches([], [ ['', 1, 1.23, 'hello'] ])",
"None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self):",
"xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def test_empty_xls(self): with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(b'', None, None)",
"right `None` gets dropped self.assert_matches([], [ ['', 1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches:",
"options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None):",
"'Dog'], ], [ [1, 2], [2, 3], [3, 4] ] ) def test_value_types(self):",
"[]) testing.assert_list_equal([1, 2, 3], [1, 2, 3]) testing.assert_list_equal((1, 2, 3), [1, 2, 3])",
"xlsx_headers, xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None,",
"('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"),",
"persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq',",
"BytesIO from unittest import mock import pytest import xlsxwriter import xlwt from webgrid",
"in range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1, 2], (x for x in range(3)))",
"pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], [])",
"[None, 1, 1.23, 'hello', None] ]) # the left `None` becomes an empty",
"len(headers) def set_values(self, values): row_offset = 0 if self.headers_written: row_offset = self.headers_written for",
"in enumerate(values, start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self,",
"# the right `None` gets dropped self.assert_matches([], [ [None, 1, 1.23, 'hello'] ])",
"'Bar']], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def test_multiple_rows(self):",
"def set_headers(self, headers): for index, header in enumerate(headers): self.sheet.write(0, index, header) self.headers_written =",
"def test_value_types(self): self.set_values([ [1, 1.23, 'hello', None, True, False] ]) self.assert_matches([], [ [1,",
"setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property def filters(self): return ( ('createdts',",
"]) self.assert_matches([], [ [1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo',",
"def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches(",
"= 0 if self.headers_written: row_offset = 1 for row_index, row in enumerate(values, start=row_offset):",
"def setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property def filters(self): return (",
"3]) testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], [])",
"BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None def",
") class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'),",
"5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date',",
"\"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property",
"[1, 1.23, 'hello', None, True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23,",
"start=row_offset): for col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers, xls_rows):",
"self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1, 2,",
"sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls):",
"'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model = 'foo'\"),",
") @classmethod def setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property def filters(self):",
"self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']],",
"('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property",
"'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self, _):",
"'2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000'",
"RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person, db def setup_module(): import flask assert not",
"AS TIME)\"), ) def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5, 30),",
"]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream = BytesIO() self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True})",
"1, 5, 30), due_date=dt.date(2019, 5, 31), start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due",
"unittest import mock import pytest import xlsxwriter import xlwt from webgrid import testing",
"\"WHERE sabwp_radios.model = 'foo'\"), ('year', 'eq', '1945', \"WHERE sabwp_radios.year = 1945\"), ) @classmethod",
"[], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], []) def test_multiple_headers(self):",
"None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self):",
"[ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [ [1, 2], [2, 3], [3, 4]",
"5, 31), start_time=dt.time(1, 30), ) def test_expected_rows(self): self.expect_table_header((('Created', 'Due Date', 'Start Time'), ))",
"import testing from webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person, db def",
"4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1, 2],",
"self.expect_table_header((('Created', 'Due Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), ))",
"( ('make', 'eq', 'foo', \"WHERE sabwp_radios.make = 'foo'\"), ('model', 'eq', 'foo', \"WHERE sabwp_radios.model",
"1.23, 'hello', None] ]) # the right `None` gets dropped self.assert_matches([], [ [None,",
"'hello', None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', None, True, False]",
"['Snoopy', 'Dog'], ], [ [1, 2], [2, 3], [3, 4] ] ) def",
"]) self.assert_matches([], [ [1, 1.23, 'hello', None, True, False] ]) def test_none_is_mangled(self): self.set_values([",
"'hello', '', True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None]",
"= '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT",
"db def setup_module(): import flask assert not flask.request class TestAssertListEqual: \"\"\"Verify the `assert_list_equal`",
"!= 'mssql': pytest.skip('sql server-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018,",
"[ [1, 2], [2, 3], [3, 4] ] ) def test_value_types(self): self.set_values([ [1,",
"AM'), )) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date',",
"test_none_is_mangled(self): self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the right `None` gets",
"self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1, 2], [2, 3], [3, 4]",
"]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1, 2], [2,",
"[ [1, 1.23, 'hello', None, True, False] ]) def test_none_is_mangled(self): self.set_values([ [None, 1,",
"1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000'",
"def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with pytest.raises(Exception, match=r'openpyxl is required.*'): self.assert_matches([], []) def",
"self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2, 3]]) def",
"2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2],",
"3, 4] ]) def test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1,",
"db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018,",
"import xlwt from webgrid import testing from webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities",
"xlsxwriter.Workbook(self.stream, options={'in_memory': True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl',",
"persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) class TestGridBaseMSSQLStrings(testing.MSSQLGridBase): grid_cls = RadioGrid @property def",
"db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test') @property def filters(self): return ( ('createdts', 'eq',",
"self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1, 2,",
"enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows) def",
"BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date = '2018-01-01'\"), ('start_time',",
"[1, 2, 4]) def test_order_is_significant(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3], [2, 3, 1])",
"x in range(3)), [0, 1, 2]) testing.assert_list_equal([0, 1, 2], (x for x in",
"= TemporalGrid sort_tests = ( ('createdts', 'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod",
"self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'],",
"CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self, _): Person.delete_cascaded() Person.testing_create( createdts=dt.datetime(2018, 1, 1, 5,",
"xlsx_rows) def test_empty_xlsx(self): with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, None, None)",
"webgrid_ta.model.entities import Person, db def setup_module(): import flask assert not flask.request class TestAssertListEqual:",
"testing.assert_list_equal((x for x in range(3)), (x for x in range(3))) testing.assert_list_equal((x for x",
"self.assert_matches([], [ ['', 1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self): self.stream =",
"[ [1, 2, 3], [2, 3, 4] ]) def test_headers_and_rows(self): self.set_headers(['Foo', 'Bar']) self.set_values([",
")) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase): grid_cls = TemporalGrid",
"test_headers_and_rows(self): self.set_headers([ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ]) self.set_values([ [1, 2], [2, 3], [3,",
"range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream =",
"True}) self.sheet = self.workbook.add_worksheet('sheet1') self.headers_written = None def test_openpyxl_requirement(self): with mock.patch('webgrid.testing.openpyxl', None): with",
"headers): assert self.headers_written is None self.set_values(headers) self.headers_written = len(headers) def set_values(self, values): row_offset",
"'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"), )",
"with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([],",
"30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"),",
"dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018,",
"gets dropped self.assert_matches([], [ ['', 1, 1.23, 'hello'] ]) class TestAssertRenderedXlsxMatches: def setup(self):",
"def test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([],",
"\"\"\"Verify the `assert_list_equal` method performs as expected\"\"\" def test_simple_equivalents(self): testing.assert_list_equal([], []) testing.assert_list_equal([1, 2,",
"testing.assert_rendered_xls_matches(b'', None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, None, None) with pytest.raises(AssertionError): testing.assert_rendered_xls_matches(None, [], [])",
"3], [3, 4] ] ) def test_value_types(self): self.set_values([ [1, 1.23, 'hello', None, True,",
"('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def setup_method(self,",
"3]]) def test_multiple_rows(self): self.set_values([ [1, 2, 3], [2, 3, 4] ]) self.assert_matches([], [",
"pytest import xlsxwriter import xlwt from webgrid import testing from webgrid_ta.grids import RadioGrid,",
"self.headers_written: row_offset = self.headers_written for row_index, row in enumerate(values, start=row_offset): for col_index, value",
"[None, 1, 1.23, 'hello'] ]) class TestGridBase(testing.GridBase): grid_cls = TemporalGrid sort_tests = (",
"[2, 3], [3, 4] ]) self.assert_matches( [ ['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [",
"set_headers(self, headers): for index, header in enumerate(headers): self.sheet.write(0, index, header) self.headers_written = True",
"('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql",
"(x for x in range(3))) class TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook() self.sheet",
"testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def test_different_elements(self): with pytest.raises(AssertionError): testing.assert_list_equal([1, 2, 3],",
"[]) def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1, 2, 3]])",
"None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', None, True, False] ])",
"('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property",
"webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities import Person, db def setup_module(): import flask",
"['Foo', 'Bar'], ['Snoopy', 'Dog'], ], [ [1, 2], [2, 3], [3, 4] ]",
"'persons.createdts'), ('due_date', 'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql':",
"@classmethod def setup_class(cls): if db.engine.dialect.name != 'mssql': pytest.skip('sql server-only test') @property def filters(self):",
"pytest.raises(AssertionError): testing.assert_rendered_xlsx_matches(None, [], []) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers([['Foo']]) self.assert_matches([['Foo']], [])",
"testing.assert_list_equal('123', '123') def test_different_lengths(self): with pytest.raises(AssertionError): testing.assert_list_equal([], [1]) with pytest.raises(AssertionError): testing.assert_list_equal([1], []) def",
"[]) def test_blank_workbook(self): self.assert_matches([], []) def test_single_header(self): self.set_headers(['Foo']) self.assert_matches(['Foo'], []) def test_multiple_headers(self): self.set_headers(['Foo',",
"'persons.due_date'), ('start_time', 'persons.start_time'), ) @classmethod def setup_class(cls): if db.engine.dialect.name != 'sqlite': pytest.skip('sqlite-only test')",
"self.set_values([ [1, 2, 3], [2, 3, 4] ]) self.assert_matches([], [ [1, 2, 3],",
"]) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( [ ['Foo', 'Bar'],",
"30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date', 'eq', dt.date(2018, 1, 1), \"WHERE persons.due_date",
"def setup_class(cls): if db.engine.dialect.name != 'postgresql': pytest.skip('postgres-only test') @property def filters(self): return (",
"[3, 4] ] ) def test_value_types(self): self.set_values([ [1, 1.23, 'hello', None, True, False]",
"False] ]) self.assert_matches([], [ [1, 1.23, 'hello', None, True, False] ]) def test_none_is_mangled(self):",
") def test_value_types(self): self.set_values([ [1, 1.23, 'hello', None, True, False] ]) self.assert_matches([], [",
"self.set_headers(['Foo', 'Bar']) self.set_values([ [1, 2], [2, 3], [3, 4] ]) self.assert_matches( ['Foo', 'Bar'],",
"values): row_offset = 0 if self.headers_written: row_offset = 1 for row_index, row in",
"header) self.headers_written = True def set_values(self, values): row_offset = 0 if self.headers_written: row_offset",
"persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls =",
"'eq', '2018-01-01', \"WHERE persons.due_date = '2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN",
"xlwt from webgrid import testing from webgrid_ta.grids import RadioGrid, TemporalGrid from webgrid_ta.model.entities import",
"3, 4] ]) self.assert_matches([], [ [1, 2, 3], [2, 3, 4] ]) def",
"pytest.skip('sqlite-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5,",
"self.set_values([ [None, 1, 1.23, 'hello', None] ]) # the right `None` gets dropped",
"]) # the right `None` gets dropped self.assert_matches([], [ [None, 1, 1.23, 'hello']",
"BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)\"), ) class TestGridBaseMSSQLDates(testing.MSSQLGridBase): grid_cls = TemporalGrid",
"col_index, value in enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xls_headers, xls_rows): self.workbook.save(self.stream) testing.assert_rendered_xls_matches(self.stream.getvalue(),",
"Date', 'Start Time'), )) self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), )) class TestGridBasePG(testing.GridBase):",
"enumerate(row): self.sheet.write(row_index, col_index, value) def assert_matches(self, xlsx_headers, xlsx_rows): self.workbook.close() testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows) def",
"None, True, False] ]) self.assert_matches([], [ [1, 1.23, 'hello', '', True, False] ])",
"class TestAssertRenderedXlsMatches: def setup(self): self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet('sheet1') self.stream = BytesIO()",
"test_multiple_headers(self): self.set_headers([['Foo', 'Bar']]) self.assert_matches([['Foo', 'Bar']], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([], [[1,",
"('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30), \"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'\"), ('due_date',",
"'2018-01-01'\"), ('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'), \"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)\"), ) def",
"pytest.skip('postgres-only test') @property def filters(self): return ( ('createdts', 'eq', dt.datetime(2018, 1, 1, 5,",
"def test_multiple_headers(self): self.set_headers(['Foo', 'Bar']) self.assert_matches(['Foo', 'Bar'], []) def test_single_row(self): self.set_values([[1, 2, 3]]) self.assert_matches([],"
] |
[
"[dir_path + \"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros,",
"\"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2',",
"dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect',",
"Extension from setuptools import setup, find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs =",
"packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The MIT License', tests_require='nose', package_data={'': ['*.pyx', '*.pxd']},",
"find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\", dir_path] macros",
"= os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions",
"extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\", ext_modules",
"author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The MIT License', tests_require='nose',",
"import cythonize from setuptools.extension import Extension from setuptools import setup, find_packages import os",
"install_requires=['cython'], test_suite='nose.collector', license = 'The MIT License', tests_require='nose', package_data={'': ['*.pyx', '*.pxd']}, include_dirs=[\".\"], )",
"setuptools import setup, find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path +",
"long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The MIT License',",
"import setup, find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\",",
"\"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\",",
"dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")]",
"cythonize from setuptools.extension import Extension from setuptools import setup, find_packages import os dir_path",
"language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The MIT",
"= [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\", ext_modules =",
"interval intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'],",
"= [dir_path + \"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"],",
"description=\"fast, simple interval intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(),",
"import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\", dir_path] macros =",
"intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector',",
"os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\", dir_path] macros = [(\"CYTHON_TRACE\",",
"from setuptools.extension import Extension from setuptools import setup, find_packages import os dir_path =",
"define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(),",
"ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license",
"import Extension from setuptools import setup, find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs",
"setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>,",
"from Cython.Build import cythonize from setuptools.extension import Extension from setuptools import setup, find_packages",
"name='quicksect', description=\"fast, simple interval intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\",",
"cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The",
"setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The MIT License', tests_require='nose', package_data={'': ['*.pyx', '*.pxd']}, include_dirs=[\".\"],",
"[Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\", ext_modules = cythonize(extensions,",
"author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The MIT License', tests_require='nose', package_data={'':",
"[\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\", ext_modules = cythonize(extensions, language_level=3),",
"include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\",",
"include_dirs = [dir_path + \"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\",",
"[(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple interval",
"macros = [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast,",
"Cython.Build import cythonize from setuptools.extension import Extension from setuptools import setup, find_packages import",
"= [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)] setup(version='0.2.2', name='quicksect', description=\"fast, simple",
"from setuptools import setup, find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path",
"setuptools.extension import Extension from setuptools import setup, find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__))",
"simple interval intersection\", ext_modules = cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'],",
"+ \"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions = [Extension(\"quicksect\", [\"src/quicksect.pyx\"], define_macros=macros, include_dirs=include_dirs)]",
"= cythonize(extensions, language_level=3), long_description=open('README.rst').read(), author=\"<NAME>,<NAME>\", author_email=\"<EMAIL>, <EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license =",
"<EMAIL>\", packages=find_packages(), setup_requires=['cython'], install_requires=['cython'], test_suite='nose.collector', license = 'The MIT License', tests_require='nose', package_data={'': ['*.pyx',",
"setup, find_packages import os dir_path = os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\", dir_path]",
"os.path.dirname(os.path.realpath(__file__)) include_dirs = [dir_path + \"/src\", dir_path] macros = [(\"CYTHON_TRACE\", \"1\")] extensions ="
] |
[
"'api_version': '0.1', 'is_test_db': True }, 'notes': [ { 'title': 'some note title', 'text':",
"note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete a note",
"i, note return False def search_notes(query): \"\"\"Search notes by query.\"\"\" def match_token(note, tokens):",
"/search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get and return all notes matching search query.\"\"\"",
"/version .\"\"\" def get(self): \"\"\"Handle get and return verision and api_version.\"\"\" response =",
"delete note with given title from database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404)",
"{ 'notes': db['notes'] } self.write(response) def put(self, *args, **kwargs): \"\"\"Handle put and create",
"note in enumerate(db['notes']): if note[\"title\"] == title: return i, note return False def",
"return False tokens_found.append(token) return len(tokens_found) == len(tokens) notes = [] query_tokens = tokenize(query)",
"note in db['notes']: if match_token(note, query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set",
"database.\"\"\" found = find_note(title) if not found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title))",
"Notes App frontend.\"\"\" import tornado.escape import tornado.ioloop import tornado.web import tornado.escape from tornado_cors",
"**kwargs): \"\"\"Handle put and create / update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update",
"os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path) as f: db = json.load(f) is_closing =",
"note[\"title\"] + \" \" + note[\"text\"] if token not in s.lower(): return False",
"'some note title', 'text': 'some note text' }, { 'title': 'other note title',",
"'note_titles': [note[\"title\"] for note in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\"",
"db_path) with open(db_path) as f: db = json.load(f) is_closing = False def signal_handler(signum,",
"is missing, True if any match. \"\"\" tokens_found = [] for token in",
"note[\"title\"] if isinstance(title_update, dict): find_title = title_update[\"old\"] new_title = title_update[\"new\"] else: find_title =",
"logging import json import os import signal import sys PORT = 3456 DB_PATH",
"from database.\"\"\" found = find_note(title) if not found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not",
"note in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle",
"False if no such note.\"\"\" for i, note in enumerate(db['notes']): if note[\"title\"] ==",
"logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path) as f: db = json.load(f)",
"del db['notes'][found[0]] def update_note(title, note): \"\"\"Update an existing note with a given title,",
"not found!\".format(title)) return response = found[1] self.write(response) def delete(self, title): \"\"\"Handle delete and",
"exist.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def",
"logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start() logging.info(\"Server stopped.\") if __name__",
"\"\": response = { 'notes': db['notes'] } else: response = { 'notes': search_notes(self.get_argument('q'))",
"'GET, PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, ' 'X-Requested-With,",
"all notes from database.\"\"\" response = { 'notes': db['notes'] } self.write(response) def put(self,",
"is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database for use.\"\"\" global",
"def put(self, *args, **kwargs): \"\"\"Handle put and create / update give note.\"\"\" note",
"'timestamp': note[\"timestamp\"] } found = find_note(find_title) if not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note",
"get and return note with given title from database.\"\"\" found = find_note(title) if",
"add a new note with title that is already taken.\"\"\" def __init__(self, title):",
"titles from database.\"\"\" response = { 'note_titles': [note[\"title\"] for note in db['notes']] }",
"'notes': db['notes'] } else: response = { 'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler):",
"for use.\"\"\" global db db_path = DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server",
"to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note from notes.\"\"\"",
"title', 'text': 'other note text' } ] } def tokenize(s): \"\"\"Split string into",
"\"\"\"Delete note from notes.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) del",
"in tokens: s = note[\"title\"] + \" \" + note[\"text\"] if token not",
"taken.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class",
"with title that is already taken.\"\"\" def __init__(self, title): \"\"\"Show exception with the",
"/test/begin .\"\"\" def get(self): \"\"\"Setup test to have end with expected state afterwards.\"\"\"",
"API for the Notes App frontend.\"\"\" import tornado.escape import tornado.ioloop import tornado.web import",
"self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have expected",
"implementation still. Return False if any of the tokens is missing, True if",
"/notes/titles .\"\"\" def get(self): \"\"\"Handle get and return all note titles from database.\"\"\"",
"delete and delete note with given title from database.\"\"\" try: delete_note(title) except NoSuchNoteExists:",
"\"\"\"Set up CORS and allow separate origin for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080'",
"\"\"\"Handle get and return all notes from database.\"\"\" response = { 'notes': db['notes']",
"if isinstance(title_update, dict): find_title = title_update[\"old\"] new_title = title_update[\"new\"] else: find_title = title_update",
"\"\"\"Handle get and return note with given title from database.\"\"\" found = find_note(title)",
"\"\"\"Check if started with use test db flag.\"\"\" return \"--use-test-db\" in sys.argv routes",
"added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*)",
"find_note(title) if not found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return response =",
"in db['notes']: if match_token(note, query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up",
"notes from database.\"\"\" response = { 'notes': db['notes'] } self.write(response) def put(self, *args,",
"new_title = title_update[\"new\"] else: find_title = title_update new_title = title_update _note = {",
"try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does not even exist.\".format(title)) class",
"= tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database for use.\"\"\" global db db_path =",
"'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin, '",
"given title, possibly retitling it.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title)",
"def try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing if is_closing: # clean up here",
"title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"])",
"f: db = json.load(f) is_closing = False def signal_handler(signum, frame): \"\"\"Signal handler for",
"title): \"\"\"Show exception with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note",
"import tornado.escape import tornado.ioloop import tornado.web import tornado.escape from tornado_cors import CorsMixin import",
"[p.lower() for p in s.split(\" \") if p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying",
"if any of the tokens is missing, True if any match. \"\"\" tokens_found",
"note[\"title\"] == title: return i, note return False def search_notes(query): \"\"\"Search notes by",
". /note/:title GET DELETE \"\"\" def get(self, title): \"\"\"Handle get and return note",
"allow separate origin for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET, PUT,",
"db = json.load(f) is_closing = False def signal_handler(signum, frame): \"\"\"Signal handler for closing",
"tornado.web import tornado.escape from tornado_cors import CorsMixin import logging import json import os",
"tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT)",
"import tornado.ioloop import tornado.web import tornado.escape from tornado_cors import CorsMixin import logging import",
"title_update = note[\"title\"] if isinstance(title_update, dict): find_title = title_update[\"old\"] new_title = title_update[\"new\"] else:",
"missing, True if any match. \"\"\" tokens_found = [] for token in tokens:",
"database for use.\"\"\" global db db_path = DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH",
"= find_note(title) if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note",
"return response = found[1] self.write(response) def delete(self, title): \"\"\"Handle delete and delete note",
"note title', 'text': 'some note text' }, { 'title': 'other note title', 'text':",
"self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return response = found[1] self.write(response) def delete(self, title):",
"tornado.escape from tornado_cors import CorsMixin import logging import json import os import signal",
"if not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear()",
"is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path) as",
"if not found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note): \"\"\"Update an existing",
"'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\"",
"use test db flag.\"\"\" return \"--use-test-db\" in sys.argv routes = [ (r\"/version\", VersionRootHandler),",
"TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have end with expected",
"DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with",
"title that is already taken.\"\"\" def __init__(self, title): \"\"\"Show exception with the note",
"NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler)",
"to add a new note with title that is already taken.\"\"\" def __init__(self,",
"with a given title, possibly retitling it.\"\"\" found = find_note(title) if not found:",
"'Origin, ' 'Accept, ' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class",
"True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle get and",
"False def search_notes(query): \"\"\"Search notes by query.\"\"\" def match_token(note, tokens): \"\"\"Test if note",
"get(self): \"\"\"Handle get and return verision and api_version.\"\"\" response = { 'version': '0.0.1',",
"even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle get and return",
"with given title from database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}'",
"self).__init__(title) def add_note(note): \"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def",
"= { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle",
"return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and allow separate origin for",
"if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database for use.\"\"\"",
"an existing note with a given title, possibly retitling it.\"\"\" found = find_note(title)",
"DELETE \"\"\" def get(self, title): \"\"\"Handle get and return note with given title",
"self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get and return all",
"import os import signal import sys PORT = 3456 DB_PATH = \"db.json\" TEST_DB_PATH",
"of note that has title or False if no such note.\"\"\" for i,",
"exception with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note to notes.\"\"\"",
"tokens is missing, True if any match. \"\"\" tokens_found = [] for token",
"} self.write(response) def put(self, *args, **kwargs): \"\"\"Handle put and create / update give",
"put(self, *args, **kwargs): \"\"\"Handle put and create / update give note.\"\"\" note =",
"have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test",
"if started with use test db flag.\"\"\" return \"--use-test-db\" in sys.argv routes =",
"'some note text' }, { 'title': 'other note title', 'text': 'other note text'",
"such note.\"\"\" for i, note in enumerate(db['notes']): if note[\"title\"] == title: return i,",
"] test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application",
"title_update new_title = title_update _note = { 'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"]",
"# clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\")",
"} self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle get and return",
"notes = [] query_tokens = tokenize(query) for note in db['notes']: if match_token(note, query_tokens):",
"afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if started with use test db flag.\"\"\" return",
"= DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path)",
"class NoteAlreadyExists(Exception): \"\"\"Raised if trying to add a new note with title that",
"= { 'note_titles': [note[\"title\"] for note in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle",
"note[\"text\"], 'timestamp': note[\"timestamp\"] } found = find_note(find_title) if not found: add_note(_note) self.clear() self.set_status(200)",
"is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting",
"/test/begin .\"\"\" def get(self): \"\"\"Setup test to have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler):",
"up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT,",
"of the tokens is missing, True if any match. \"\"\" tokens_found = []",
"= tokenize(query) for note in db['notes']: if match_token(note, query_tokens): notes.append(note) return notes class",
"/note/:title GET DELETE \"\"\" def get(self, title): \"\"\"Handle get and return note with",
"simple server with a REST API for the Notes App frontend.\"\"\" import tornado.escape",
"still. Return False if any of the tokens is missing, True if any",
"is_closing if is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start",
"note contains any of the tokens. A very simple implementation still. Return False",
"tokens.\"\"\" return [p.lower() for p in s.split(\" \") if p] class NoteAlreadyExists(Exception): \"\"\"Raised",
"trying to delete a note that doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show exception",
"class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have expected state.\"\"\"",
"if p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying to add a new note with",
"test to have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self):",
"any of the tokens is missing, True if any match. \"\"\" tokens_found =",
"False tokens_found.append(token) return len(tokens_found) == len(tokens) notes = [] query_tokens = tokenize(query) for",
"query.\"\"\" response = { 'notes': [] } if self.get_argument('q') == \"\": response =",
"found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title): \"\"\"Return (index, note) of note that has",
"updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET DELETE \"\"\" def get(self, title):",
"= { 'notes': [] } if self.get_argument('q') == \"\": response = { 'notes':",
"\"\"\"Setup test to have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def",
"= [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ]",
"for note in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self):",
"find_note(find_title) if not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note)",
"'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, ' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers'",
"note[\"timestamp\"] } found = find_note(find_title) if not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}'",
"[ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes",
"existing note with a given title, possibly retitling it.\"\"\" found = find_note(title) if",
"found = find_note(find_title) if not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else:",
"\"\"\"Try closing tornado.\"\"\" global is_closing if is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit",
"note text' }, { 'title': 'other note title', 'text': 'other note text' }",
"[ { 'title': 'some note title', 'text': 'some note text' }, { 'title':",
"if not found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return response = found[1]",
"} def tokenize(s): \"\"\"Split string into tokens.\"\"\" return [p.lower() for p in s.split(\"",
"note that has title or False if no such note.\"\"\" for i, note",
"get(self, title): \"\"\"Handle get and return note with given title from database.\"\"\" found",
"global is_closing logging.info('exiting...') is_closing = True def try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing",
"= { 'notes': db['notes'] } else: response = { 'notes': search_notes(self.get_argument('q')) } self.write(response)",
"def delete(self, title): \"\"\"Handle delete and delete note with given title from database.\"\"\"",
"signal_handler(signum, frame): \"\"\"Signal handler for closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing = True",
"token not in s.lower(): return False tokens_found.append(token) return len(tokens_found) == len(tokens) notes =",
"def add_note(note): \"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title):",
"PORT = 3456 DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db = { 'version':",
"server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start() logging.info(\"Server stopped.\") if __name__ ==",
"tornado.ioloop import tornado.web import tornado.escape from tornado_cors import CorsMixin import logging import json",
"= \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db = { 'version': { 'version': '0.0.1', 'api_version':",
"\"\"\"Show exception with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying",
"the tokens is missing, True if any match. \"\"\" tokens_found = [] for",
"read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start() logging.info(\"Server stopped.\") if __name__ == \"__main__\":",
"in enumerate(db['notes']): if note[\"title\"] == title: return i, note return False def search_notes(query):",
"from database.\"\"\" response = { 'note_titles': [note[\"title\"] for note in db['notes']] } self.write(response)",
"for p in s.split(\" \") if p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying to",
"server with a REST API for the Notes App frontend.\"\"\" import tornado.escape import",
"return all notes from database.\"\"\" response = { 'notes': db['notes'] } self.write(response) def",
"signal import sys PORT = 3456 DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db",
"test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application =",
"for closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing = True def try_exit(): \"\"\"Try closing",
"and return note with given title from database.\"\"\" found = find_note(title) if not",
"found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note): \"\"\"Update an existing note with",
"and return all notes matching search query.\"\"\" response = { 'notes': [] }",
"TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def read_db(): \"\"\"'Read",
"with open(db_path) as f: db = json.load(f) is_closing = False def signal_handler(signum, frame):",
"(r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def read_db(): \"\"\"'Read in'",
"if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note from notes.\"\"\" found =",
"and api_version.\"\"\" response = { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True } self.write(response)",
"' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version",
"= title_update new_title = title_update _note = { 'title': new_title, 'text': note[\"text\"], 'timestamp':",
"delete a note that doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show exception with the",
"= found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title): \"\"\"Return (index, note) of note that",
"] if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database for",
"db['notes'][found[0]] = note def find_note(title): \"\"\"Return (index, note) of note that has title",
"{ 'note_titles': [note[\"title\"] for note in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*)",
"'notes': [] } if self.get_argument('q') == \"\": response = { 'notes': db['notes'] }",
"and return all note titles from database.\"\"\" response = { 'note_titles': [note[\"title\"] for",
"os import signal import sys PORT = 3456 DB_PATH = \"db.json\" TEST_DB_PATH =",
"\"\"\"Handle delete and delete note with given title from database.\"\"\" try: delete_note(title) except",
"for note in db['notes']: if match_token(note, query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler):",
"[note[\"title\"] for note in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def",
"into tokens.\"\"\" return [p.lower() for p in s.split(\" \") if p] class NoteAlreadyExists(Exception):",
"len(tokens_found) == len(tokens) notes = [] query_tokens = tokenize(query) for note in db['notes']:",
"token in tokens: s = note[\"title\"] + \" \" + note[\"text\"] if token",
"'{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle",
"possibly retitling it.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] =",
"note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update, dict): find_title = title_update[\"old\"] new_title",
"tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start() logging.info(\"Server stopped.\") if",
"delete_note(title): \"\"\"Delete note from notes.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title)",
"response = { 'notes': db['notes'] } self.write(response) def put(self, *args, **kwargs): \"\"\"Handle put",
"class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle get and return all notes",
"clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db()",
"= find_note(find_title) if not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title,",
"add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}'",
"CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, ' 'X-Requested-With, ' 'Content-Type, '",
"if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path)",
"title: return i, note return False def search_notes(query): \"\"\"Search notes by query.\"\"\" def",
"'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self):",
"'0.1', 'is_test_db': True }, 'notes': [ { 'title': 'some note title', 'text': 'some",
"not in s.lower(): return False tokens_found.append(token) return len(tokens_found) == len(tokens) notes = []",
"(r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes = [ (r\"/test/begin\",",
"(r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ]",
"\"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle get and return all note titles from",
"i, note in enumerate(db['notes']): if note[\"title\"] == title: return i, note return False",
"self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title))",
"already taken.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title)",
"] } def tokenize(s): \"\"\"Split string into tokens.\"\"\" return [p.lower() for p in",
"if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title):",
"match_token(note, query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and allow",
"return len(tokens_found) == len(tokens) notes = [] query_tokens = tokenize(query) for note in",
"\"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle get and return all notes from database.\"\"\"",
"note that doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\"",
"found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note",
"_note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET",
"(r\"/search\", NotesSearchHandler), ] test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db():",
"match_token(note, tokens): \"\"\"Test if note contains any of the tokens. A very simple",
"import tornado.escape from tornado_cors import CorsMixin import logging import json import os import",
"note titles from database.\"\"\" response = { 'note_titles': [note[\"title\"] for note in db['notes']]",
"note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title): \"\"\"Return (index, note) of note",
"DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db = { 'version': { 'version': '0.0.1',",
"new_title = title_update _note = { 'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] }",
"response = found[1] self.write(response) def delete(self, title): \"\"\"Handle delete and delete note with",
"NoteAlreadyExists(Exception): \"\"\"Raised if trying to add a new note with title that is",
"that is already taken.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\"",
"_note = { 'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found = find_note(find_title)",
"else: response = { 'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\"",
"NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note from notes.\"\"\" found = find_note(title) if not",
"all note titles from database.\"\"\" response = { 'note_titles': [note[\"title\"] for note in",
"query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and allow separate",
"with given title from database.\"\"\" found = find_note(title) if not found: self.clear() self.set_status(404)",
"'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found = find_note(find_title) if not found:",
"self.write(response) def put(self, *args, **kwargs): \"\"\"Handle put and create / update give note.\"\"\"",
"text' } ] } def tokenize(s): \"\"\"Split string into tokens.\"\"\" return [p.lower() for",
"notes matching search query.\"\"\" response = { 'notes': [] } if self.get_argument('q') ==",
"get(self): \"\"\"Handle get and return all note titles from database.\"\"\" response = {",
"'notes': db['notes'] } self.write(response) def put(self, *args, **kwargs): \"\"\"Handle put and create /",
"= \"test/test_db.json\" db = { 'version': { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True",
"state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have",
"CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, '",
"def get(self): \"\"\"Handle get and return all note titles from database.\"\"\" response =",
"'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test",
"import logging import json import os import signal import sys PORT = 3456",
"NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note): \"\"\"Update an existing note with a given",
"frontend.\"\"\" import tornado.escape import tornado.ioloop import tornado.web import tornado.escape from tornado_cors import CorsMixin",
"not found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return response = found[1] self.write(response)",
"with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note to notes.\"\"\" if",
"CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and allow separate origin for the client.\"\"\" CORS_ORIGIN",
"success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start()",
"origin for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS",
"is_closing = False def signal_handler(signum, frame): \"\"\"Signal handler for closing tornado.\"\"\" global is_closing",
"' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle",
"return all note titles from database.\"\"\" response = { 'note_titles': [note[\"title\"] for note",
"title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete a note that",
"else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) .",
"with a REST API for the Notes App frontend.\"\"\" import tornado.escape import tornado.ioloop",
"that has title or False if no such note.\"\"\" for i, note in",
"find_title = title_update[\"old\"] new_title = title_update[\"new\"] else: find_title = title_update new_title = title_update",
"'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found = find_note(find_title) if not found: add_note(_note) self.clear()",
"'{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET DELETE \"\"\" def get(self,",
"return verision and api_version.\"\"\" response = { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True",
"= { 'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found = find_note(find_title) if",
") class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle get and return verision",
"== title: return i, note return False def search_notes(query): \"\"\"Search notes by query.\"\"\"",
"to delete a note that doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show exception with",
"if trying to add a new note with title that is already taken.\"\"\"",
"\") if p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying to add a new note",
"note) of note that has title or False if no such note.\"\"\" for",
"put and create / update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"]",
"import sys PORT = 3456 DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db =",
"the tokens. A very simple implementation still. Return False if any of the",
"def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note):",
"up CORS and allow separate origin for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS",
"'other note text' } ] } def tokenize(s): \"\"\"Split string into tokens.\"\"\" return",
"def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start()",
"= find_note(title) if not found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return response",
"{ 'notes': [] } if self.get_argument('q') == \"\": response = { 'notes': db['notes']",
"and return all notes from database.\"\"\" response = { 'notes': db['notes'] } self.write(response)",
"add_note(note): \"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete",
"def tokenize(s): \"\"\"Split string into tokens.\"\"\" return [p.lower() for p in s.split(\" \")",
"( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, ' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, '",
"with use test db flag.\"\"\" return \"--use-test-db\" in sys.argv routes = [ (r\"/version\",",
"any match. \"\"\" tokens_found = [] for token in tokens: s = note[\"title\"]",
"notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and allow separate origin",
"title): \"\"\"Handle get and return note with given title from database.\"\"\" found =",
"expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to",
".\"\"\" def get(self): \"\"\"Handle get and return verision and api_version.\"\"\" response = {",
"'0.1', 'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle",
"def get(self): \"\"\"Handle get and return all notes from database.\"\"\" response = {",
"if no such note.\"\"\" for i, note in enumerate(db['notes']): if note[\"title\"] == title:",
"'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle get and return",
"= { 'version': { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True }, 'notes': [",
"'{}' does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle",
"def get(self): \"\"\"Setup test to have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin",
"title from database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does not",
"response = { 'notes': [] } if self.get_argument('q') == \"\": response = {",
"db db_path = DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server:",
"= find_note(title) if not found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note): \"\"\"Update",
"= title_update _note = { 'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found",
"def get(self, title): \"\"\"Handle get and return note with given title from database.\"\"\"",
"find_title = title_update new_title = title_update _note = { 'title': new_title, 'text': note[\"text\"],",
"\" + note[\"text\"] if token not in s.lower(): return False tokens_found.append(token) return len(tokens_found)",
"from database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does not even",
"note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]): raise",
"def search_notes(query): \"\"\"Search notes by query.\"\"\" def match_token(note, tokens): \"\"\"Test if note contains",
"if any match. \"\"\" tokens_found = [] for token in tokens: s =",
"search query.\"\"\" response = { 'notes': [] } if self.get_argument('q') == \"\": response",
"verision and api_version.\"\"\" response = { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True }",
"'0.0.1', 'api_version': '0.1', 'is_test_db': True }, 'notes': [ { 'title': 'some note title',",
"text' }, { 'title': 'other note title', 'text': 'other note text' } ]",
"from notes.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def",
"update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title",
"from tornado_cors import CorsMixin import logging import json import os import signal import",
"found!\".format(title)) return response = found[1] self.write(response) def delete(self, title): \"\"\"Handle delete and delete",
"def update_note(title, note): \"\"\"Update an existing note with a given title, possibly retitling",
"NotesSearchHandler), ] test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes)",
"read_db() def is_using_test_db(): \"\"\"Check if started with use test db flag.\"\"\" return \"--use-test-db\"",
"found = find_note(title) if not found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return",
"CORS and allow separate origin for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS =",
"give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update, dict): find_title =",
"= ( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, ' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method,",
"tornado.\"\"\" global is_closing logging.info('exiting...') is_closing = True def try_exit(): \"\"\"Try closing tornado.\"\"\" global",
"{ 'notes': db['notes'] } else: response = { 'notes': search_notes(self.get_argument('q')) } self.write(response) class",
"self.set_status(404) self.finish(\"Note '{}' does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def",
"'text': 'some note text' }, { 'title': 'other note title', 'text': 'other note",
"for the Notes App frontend.\"\"\" import tornado.escape import tornado.ioloop import tornado.web import tornado.escape",
"search_notes(query): \"\"\"Search notes by query.\"\"\" def match_token(note, tokens): \"\"\"Test if note contains any",
".\"\"\" def get(self): \"\"\"Setup test to have end with expected state afterwards.\"\"\" read_db()",
"the Notes App frontend.\"\"\" import tornado.escape import tornado.ioloop import tornado.web import tornado.escape from",
"NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title): \"\"\"Return (index, note) of",
"routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler),",
"query.\"\"\" def match_token(note, tokens): \"\"\"Test if note contains any of the tokens. A",
"of the tokens. A very simple implementation still. Return False if any of",
"is_using_test_db(): \"\"\"Check if started with use test db flag.\"\"\" return \"--use-test-db\" in sys.argv",
"__init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised",
"\"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have end with expected state",
"\"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle get and return verision and api_version.\"\"\" response",
"self.write(response) def delete(self, title): \"\"\"Handle delete and delete note with given title from",
"frame): \"\"\"Signal handler for closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing = True def",
"start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start() logging.info(\"Server",
"'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\"",
"db['notes'] } self.write(response) def put(self, *args, **kwargs): \"\"\"Handle put and create / update",
"(r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def read_db():",
"global is_closing if is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start():",
"= note def find_note(title): \"\"\"Return (index, note) of note that has title or",
"notes by query.\"\"\" def match_token(note, tokens): \"\"\"Test if note contains any of the",
"notes.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title,",
"= note[\"title\"] + \" \" + note[\"text\"] if token not in s.lower(): return",
"tokens): \"\"\"Test if note contains any of the tokens. A very simple implementation",
"\"\"\"Handle get and return verision and api_version.\"\"\" response = { 'version': '0.0.1', 'api_version':",
"\"\"\"Signal handler for closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing = True def try_exit():",
"response = { 'notes': db['notes'] } else: response = { 'notes': search_notes(self.get_argument('q')) }",
"found = find_note(title) if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] =",
"if note[\"title\"] == title: return i, note return False def search_notes(query): \"\"\"Search notes",
"TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path) as f: db =",
"note from notes.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) del db['notes'][found[0]]",
"string into tokens.\"\"\" return [p.lower() for p in s.split(\" \") if p] class",
"+ note[\"text\"] if token not in s.lower(): return False tokens_found.append(token) return len(tokens_found) ==",
"NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle get and return all note titles",
"in sys.argv routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler),",
"= 'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin,",
"VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle get and return verision and api_version.\"\"\"",
"class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle get and return verision and",
"note with given title from database.\"\"\" found = find_note(title) if not found: self.clear()",
"NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET DELETE \"\"\" def get(self, title): \"\"\"Handle get",
"title from database.\"\"\" found = find_note(title) if not found: self.clear() self.set_status(404) self.finish(\"Note '{}''",
"is already taken.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoteAlreadyExists,",
"\"\"\"Setup test to have end with expected state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check",
"expected state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if started with use test db",
"p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying to add a new note with title",
"routes.extend(test_routes) application = tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database for use.\"\"\" global db",
"database.\"\"\" response = { 'note_titles': [note[\"title\"] for note in db['notes']] } self.write(response) class",
"tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database for use.\"\"\" global db db_path = DB_PATH",
"\"\"\"Return (index, note) of note that has title or False if no such",
"find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note from notes.\"\"\" found = find_note(title)",
"tokens: s = note[\"title\"] + \" \" + note[\"text\"] if token not in",
"not found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note): \"\"\"Update an existing note",
"\"\"\"Update an existing note with a given title, possibly retitling it.\"\"\" found =",
"return note with given title from database.\"\"\" found = find_note(title) if not found:",
"in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get",
"test db flag.\"\"\" return \"--use-test-db\" in sys.argv routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\",",
"title', 'text': 'some note text' }, { 'title': 'other note title', 'text': 'other",
"note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update, dict): find_title = title_update[\"old\"]",
"find_note(title) if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def",
"json import os import signal import sys PORT = 3456 DB_PATH = \"db.json\"",
"'other note title', 'text': 'other note text' } ] } def tokenize(s): \"\"\"Split",
"/note/(.*) . /note/:title GET DELETE \"\"\" def get(self, title): \"\"\"Handle get and return",
"class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle get and return all note",
"with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete",
"and delete note with given title from database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear()",
"self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle get and return all",
"return False def search_notes(query): \"\"\"Search notes by query.\"\"\" def match_token(note, tokens): \"\"\"Test if",
"with expected state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if started with use test",
"exception with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying to",
"for token in tokens: s = note[\"title\"] + \" \" + note[\"text\"] if",
"def find_note(title): \"\"\"Return (index, note) of note that has title or False if",
"return all notes matching search query.\"\"\" response = { 'notes': [] } if",
"self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete a note that doesn't exist.\"\"\"",
"return \"--use-test-db\" in sys.argv routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler),",
"= json.load(f) is_closing = False def signal_handler(signum, frame): \"\"\"Signal handler for closing tornado.\"\"\"",
"create / update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update,",
"handler for closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing = True def try_exit(): \"\"\"Try",
"self.finish(\"Note '{}' does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self):",
"VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes = [",
".\"\"\" def get(self): \"\"\"Setup test to have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle",
"dict): find_title = title_update[\"old\"] new_title = title_update[\"new\"] else: find_title = title_update new_title =",
"open(db_path) as f: db = json.load(f) is_closing = False def signal_handler(signum, frame): \"\"\"Signal",
"note return False def search_notes(query): \"\"\"Search notes by query.\"\"\" def match_token(note, tokens): \"\"\"Test",
"NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get and return all notes matching",
"update_note(title, note): \"\"\"Update an existing note with a given title, possibly retitling it.\"\"\"",
"state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if started with use test db flag.\"\"\"",
"note[\"text\"] if token not in s.lower(): return False tokens_found.append(token) return len(tokens_found) == len(tokens)",
"CorsMixin import logging import json import os import signal import sys PORT =",
"DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, ' 'X-Requested-With, ' 'Content-Type,",
"'0.0.1', 'api_version': '0.1', 'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def",
"global db db_path = DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__))",
"isinstance(title_update, dict): find_title = title_update[\"old\"] new_title = title_update[\"new\"] else: find_title = title_update new_title",
"given title from database.\"\"\" found = find_note(title) if not found: self.clear() self.set_status(404) self.finish(\"Note",
"db['notes'].append(note) def delete_note(title): \"\"\"Delete note from notes.\"\"\" found = find_note(title) if not found:",
"that doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoSuchNoteExists,",
"the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]):",
"found = find_note(title) if not found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note):",
"get(self): \"\"\"Handle get and return all notes from database.\"\"\" response = { 'notes':",
"found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title): \"\"\"Return (index,",
"database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does not even exist.\".format(title))",
"not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle get and",
"(r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\",",
"client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers,",
"{ 'title': 'other note title', 'text': 'other note text' } ] } def",
"'text': 'other note text' } ] } def tokenize(s): \"\"\"Split string into tokens.\"\"\"",
"def get(self): \"\"\"Handle get and return all notes matching search query.\"\"\" response =",
"def is_using_test_db(): \"\"\"Check if started with use test db flag.\"\"\" return \"--use-test-db\" in",
"} ] } def tokenize(s): \"\"\"Split string into tokens.\"\"\" return [p.lower() for p",
"delete(self, title): \"\"\"Handle delete and delete note with given title from database.\"\"\" try:",
"to have end with expected state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if started",
"api_version.\"\"\" response = { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True } self.write(response) class",
"} self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have",
"closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing = True def try_exit(): \"\"\"Try closing tornado.\"\"\"",
"database.\"\"\" response = { 'notes': db['notes'] } self.write(response) def put(self, *args, **kwargs): \"\"\"Handle",
"use.\"\"\" global db db_path = DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\",",
"closing tornado.\"\"\" global is_closing if is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success')",
"self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET DELETE",
"class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and allow separate origin for the client.\"\"\"",
"title): \"\"\"Handle delete and delete note with given title from database.\"\"\" try: delete_note(title)",
"response = { 'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def",
"{ 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True }, 'notes': [ { 'title': 'some",
"\"\"\" def get(self, title): \"\"\"Handle get and return note with given title from",
"'title': 'other note title', 'text': 'other note text' } ] } def tokenize(s):",
"super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note)",
"a given title, possibly retitling it.\"\"\" found = find_note(title) if not found: raise",
"a REST API for the Notes App frontend.\"\"\" import tornado.escape import tornado.ioloop import",
"\"db.json\" TEST_DB_PATH = \"test/test_db.json\" db = { 'version': { 'version': '0.0.1', 'api_version': '0.1',",
"query_tokens = tokenize(query) for note in db['notes']: if match_token(note, query_tokens): notes.append(note) return notes",
"self.clear() self.set_status(404) self.finish(\"Note '{}' does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\"",
".\"\"\" def get(self): \"\"\"Handle get and return all notes matching search query.\"\"\" response",
"exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle get and return all",
"\"\"\"Search notes by query.\"\"\" def match_token(note, tokens): \"\"\"Test if note contains any of",
"db['notes'][found[0]] def update_note(title, note): \"\"\"Update an existing note with a given title, possibly",
"a note that doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show exception with the note",
"get(self): \"\"\"Setup test to have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\"",
"[ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def",
"{ 'title': 'some note title', 'text': 'some note text' }, { 'title': 'other",
"match. \"\"\" tokens_found = [] for token in tokens: s = note[\"title\"] +",
"\"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get and return all notes matching search",
"db = { 'version': { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True }, 'notes':",
"in s.split(\" \") if p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying to add a",
"def delete_note(title): \"\"\"Delete note from notes.\"\"\" found = find_note(title) if not found: raise",
"' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle get and",
"read_db(): \"\"\"'Read in' database for use.\"\"\" global db db_path = DB_PATH if is_using_test_db():",
"tornado.escape import tornado.ioloop import tornado.web import tornado.escape from tornado_cors import CorsMixin import logging",
"'title': 'some note title', 'text': 'some note text' }, { 'title': 'other note",
"\"\"\"Raised if trying to add a new note with title that is already",
"json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update, dict): find_title = title_update[\"old\"] new_title = title_update[\"new\"]",
"} if self.get_argument('q') == \"\": response = { 'notes': db['notes'] } else: response",
"else: find_title = title_update new_title = title_update _note = { 'title': new_title, 'text':",
"if match_token(note, query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and",
"get and return all notes from database.\"\"\" response = { 'notes': db['notes'] }",
"= False def signal_handler(signum, frame): \"\"\"Signal handler for closing tornado.\"\"\" global is_closing logging.info('exiting...')",
"tokens. A very simple implementation still. Return False if any of the tokens",
"\"\"\"Handle put and create / update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update =",
"super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete a note that doesn't",
"= note[\"title\"] if isinstance(title_update, dict): find_title = title_update[\"old\"] new_title = title_update[\"new\"] else: find_title",
"title): \"\"\"Show exception with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if",
"class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get and return all notes",
"= { 'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self):",
"signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start() logging.info(\"Server stopped.\") if __name__ == \"__main__\": start()",
"def match_token(note, tokens): \"\"\"Test if note contains any of the tokens. A very",
"\"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit, 500).start() tornado.ioloop.IOLoop.instance().start() logging.info(\"Server stopped.\")",
"= [] for token in tokens: s = note[\"title\"] + \" \" +",
"} found = find_note(find_title) if not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title))",
"db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get and",
"[] } if self.get_argument('q') == \"\": response = { 'notes': db['notes'] } else:",
"doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title)",
"A very simple implementation still. Return False if any of the tokens is",
"= title_update[\"new\"] else: find_title = title_update new_title = title_update _note = { 'title':",
"found[1] self.write(response) def delete(self, title): \"\"\"Handle delete and delete note with given title",
"} else: response = { 'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin",
"import signal import sys PORT = 3456 DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\"",
"NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle get and return all notes from",
"s.lower(): return False tokens_found.append(token) return len(tokens_found) == len(tokens) notes = [] query_tokens =",
"} self.write(response) class NotesSearchHandler(CorsBaseHandler): \"\"\"Handle /search?q=(.*) .\"\"\" def get(self): \"\"\"Handle get and return",
"== \"\": response = { 'notes': db['notes'] } else: response = { 'notes':",
"(r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes =",
"sys.argv routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\",",
".\"\"\" def get(self): \"\"\"Handle get and return all notes from database.\"\"\" response =",
"here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler)",
"note with a given title, possibly retitling it.\"\"\" found = find_note(title) if not",
"logging.info('exiting...') is_closing = True def try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing if is_closing:",
"\"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have expected state.\"\"\" read_db() class",
"\"\"\"A simple server with a REST API for the Notes App frontend.\"\"\" import",
"raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note): \"\"\"Update an existing note with a",
"Return False if any of the tokens is missing, True if any match.",
"3456 DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db = { 'version': { 'version':",
"simple implementation still. Return False if any of the tokens is missing, True",
"search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to",
"response = { 'note_titles': [note[\"title\"] for note in db['notes']] } self.write(response) class NotesSearchHandler(CorsBaseHandler):",
"and return verision and api_version.\"\"\" response = { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db':",
"contains any of the tokens. A very simple implementation still. Return False if",
"class NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete a note that doesn't exist.\"\"\" def",
"path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path) as f: db = json.load(f) is_closing",
"*args, **kwargs): \"\"\"Handle put and create / update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8'))",
"self.get_argument('q') == \"\": response = { 'notes': db['notes'] } else: response = {",
"delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler):",
"db['notes'] } else: response = { 'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle",
"notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS and allow separate origin for the",
"s = note[\"title\"] + \" \" + note[\"text\"] if token not in s.lower():",
"by query.\"\"\" def match_token(note, tokens): \"\"\"Test if note contains any of the tokens.",
"update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update, dict): find_title",
"tokens_found.append(token) return len(tokens_found) == len(tokens) notes = [] query_tokens = tokenize(query) for note",
"'{}'' not found!\".format(title)) return response = found[1] self.write(response) def delete(self, title): \"\"\"Handle delete",
"if self.get_argument('q') == \"\": response = { 'notes': db['notes'] } else: response =",
"to have expected state.\"\"\" read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup",
"db_path:\", db_path) with open(db_path) as f: db = json.load(f) is_closing = False def",
"in s.lower(): return False tokens_found.append(token) return len(tokens_found) == len(tokens) notes = [] query_tokens",
"get and return all note titles from database.\"\"\" response = { 'note_titles': [note[\"title\"]",
"App frontend.\"\"\" import tornado.escape import tornado.ioloop import tornado.web import tornado.escape from tornado_cors import",
"any of the tokens. A very simple implementation still. Return False if any",
"note to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note from",
"and allow separate origin for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET,",
"\"\"\"Handle get and return all note titles from database.\"\"\" response = { 'note_titles':",
"notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note from notes.\"\"\" found",
"found: self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return response = found[1] self.write(response) def",
"all notes matching search query.\"\"\" response = { 'notes': [] } if self.get_argument('q')",
"\"\"\"'Read in' database for use.\"\"\" global db db_path = DB_PATH if is_using_test_db(): db_path",
".\"\"\" def get(self): \"\"\"Handle get and return all note titles from database.\"\"\" response",
"CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept,",
"have end with expected state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if started with",
"'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self): \"\"\"Handle get",
"does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles .\"\"\" def get(self): \"\"\"Handle get",
"except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle",
"p in s.split(\" \") if p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying to add",
"self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler):",
"/notes .\"\"\" def get(self): \"\"\"Handle get and return all notes from database.\"\"\" response",
"NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete a note that doesn't exist.\"\"\" def __init__(self,",
"/ update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update, dict):",
"def get(self): \"\"\"Setup test to have end with expected state afterwards.\"\"\" read_db() def",
"return [p.lower() for p in s.split(\" \") if p] class NoteAlreadyExists(Exception): \"\"\"Raised if",
"s.split(\" \") if p] class NoteAlreadyExists(Exception): \"\"\"Raised if trying to add a new",
"try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing if is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop()",
"get(self): \"\"\"Setup test to have end with expected state afterwards.\"\"\" read_db() def is_using_test_db():",
"logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\" logging.info(\"Starting server...\") read_db() signal.signal(signal.SIGINT, signal_handler) application.listen(PORT) tornado.ioloop.PeriodicCallback(try_exit,",
"db_path = DB_PATH if is_using_test_db(): db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\",",
"= True def try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing if is_closing: # clean",
"separate origin for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE'",
"\"\"\"Test if note contains any of the tokens. A very simple implementation still.",
"title_update[\"new\"] else: find_title = title_update new_title = title_update _note = { 'title': new_title,",
"retitling it.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"]",
"title, possibly retitling it.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"]",
"title_update[\"old\"] new_title = title_update[\"new\"] else: find_title = title_update new_title = title_update _note =",
"def signal_handler(signum, frame): \"\"\"Signal handler for closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing =",
"a new note with title that is already taken.\"\"\" def __init__(self, title): \"\"\"Show",
"if note contains any of the tokens. A very simple implementation still. Return",
"sys PORT = 3456 DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db = {",
"raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title): \"\"\"Return (index, note)",
"__init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add",
"class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have end with",
"get and return verision and api_version.\"\"\" response = { 'version': '0.0.1', 'api_version': '0.1',",
"REST API for the Notes App frontend.\"\"\" import tornado.escape import tornado.ioloop import tornado.web",
"find_note(title) if not found: raise NoSuchNoteExists(title) del db['notes'][found[0]] def update_note(title, note): \"\"\"Update an",
"not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]] = note def find_note(title): \"\"\"Return",
"TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have expected state.\"\"\" read_db()",
"[] for token in tokens: s = note[\"title\"] + \" \" + note[\"text\"]",
"tokenize(s): \"\"\"Split string into tokens.\"\"\" return [p.lower() for p in s.split(\" \") if",
"\"\"\"Add note to notes.\"\"\" if find_note(note[\"title\"]): raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note",
"def get(self): \"\"\"Handle get and return verision and api_version.\"\"\" response = { 'version':",
"def __init__(self, title): \"\"\"Show exception with the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception):",
"self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET DELETE \"\"\"",
"\"test/test_db.json\" db = { 'version': { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True },",
"it.\"\"\" found = find_note(title) if not found: raise NoSuchNoteExists(title) note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"] db['notes'][found[0]]",
"has title or False if no such note.\"\"\" for i, note in enumerate(db['notes']):",
"GET DELETE \"\"\" def get(self, title): \"\"\"Handle get and return note with given",
"json.load(f) is_closing = False def signal_handler(signum, frame): \"\"\"Signal handler for closing tornado.\"\"\" global",
"\"\"\"Split string into tokens.\"\"\" return [p.lower() for p in s.split(\" \") if p]",
"[] query_tokens = tokenize(query) for note in db['notes']: if match_token(note, query_tokens): notes.append(note) return",
"trying to add a new note with title that is already taken.\"\"\" def",
"\"\"\"Raised if trying to delete a note that doesn't exist.\"\"\" def __init__(self, title):",
"False if any of the tokens is missing, True if any match. \"\"\"",
"import tornado.web import tornado.escape from tornado_cors import CorsMixin import logging import json import",
"'notes': [ { 'title': 'some note title', 'text': 'some note text' }, {",
"True def try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing if is_closing: # clean up",
"{ 'version': { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True }, 'notes': [ {",
"= { 'notes': db['notes'] } self.write(response) def put(self, *args, **kwargs): \"\"\"Handle put and",
"self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204) self.finish(\"Note '{}' updated.\".format(new_title)) class",
"for the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS =",
"logging.info(\"server: db_path:\", db_path) with open(db_path) as f: db = json.load(f) is_closing = False",
"tokenize(query) for note in db['notes']: if match_token(note, query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin,",
"'is_test_db': True }, 'notes': [ { 'title': 'some note title', 'text': 'some note",
"PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, ' 'X-Requested-With, '",
"'api_version': '0.1', 'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes .\"\"\" def get(self):",
"the note title.\"\"\" super(NoteAlreadyExists, self).__init__(title) class NoSuchNoteExists(Exception): \"\"\"Raised if trying to delete a",
"new note with title that is already taken.\"\"\" def __init__(self, title): \"\"\"Show exception",
"= title_update[\"old\"] new_title = title_update[\"new\"] else: find_title = title_update new_title = title_update _note",
"very simple implementation still. Return False if any of the tokens is missing,",
"note def find_note(title): \"\"\"Return (index, note) of note that has title or False",
"+ \" \" + note[\"text\"] if token not in s.lower(): return False tokens_found.append(token)",
"raise NoteAlreadyExists(note[\"title\"]) db['notes'].append(note) def delete_note(title): \"\"\"Delete note from notes.\"\"\" found = find_note(title) if",
"' 'Origin, ' 'Accept, ' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' )",
"}, { 'title': 'other note title', 'text': 'other note text' } ] }",
"True if any match. \"\"\" tokens_found = [] for token in tokens: s",
"= json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if isinstance(title_update, dict): find_title = title_update[\"old\"] new_title =",
"\"\"\"Handle /note/(.*) . /note/:title GET DELETE \"\"\" def get(self, title): \"\"\"Handle get and",
"not found: add_note(_note) self.clear() self.set_status(200) self.finish(\"Note '{}' added.\".format(find_title)) else: update_note(find_title, _note) self.clear() self.set_status(204)",
"= [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes)",
"or False if no such note.\"\"\" for i, note in enumerate(db['notes']): if note[\"title\"]",
"import CorsMixin import logging import json import os import signal import sys PORT",
"find_note(title): \"\"\"Return (index, note) of note that has title or False if no",
"no such note.\"\"\" for i, note in enumerate(db['notes']): if note[\"title\"] == title: return",
"note with given title from database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note",
"= TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path) as f: db",
"is_closing = True def try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing if is_closing: #",
"' 'Accept, ' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler):",
"\"\"\" tokens_found = [] for token in tokens: s = note[\"title\"] + \"",
"application = tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database for use.\"\"\" global db db_path",
"get and return all notes matching search query.\"\"\" response = { 'notes': []",
"= 'GET, PUT, DELETE' CORS_HEADERS = ( 'Access-Control-Allow-Headers, ' 'Origin, ' 'Accept, '",
"self.clear() self.set_status(404) self.finish(\"Note '{}'' not found!\".format(title)) return response = found[1] self.write(response) def delete(self,",
"tornado.\"\"\" global is_closing if is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def",
"def read_db(): \"\"\"'Read in' database for use.\"\"\" global db db_path = DB_PATH if",
"if token not in s.lower(): return False tokens_found.append(token) return len(tokens_found) == len(tokens) notes",
"{ 'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found = find_note(find_title) if not",
"note.\"\"\" for i, note in enumerate(db['notes']): if note[\"title\"] == title: return i, note",
"tornado_cors import CorsMixin import logging import json import os import signal import sys",
"self.finish(\"Note '{}'' not found!\".format(title)) return response = found[1] self.write(response) def delete(self, title): \"\"\"Handle",
"'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def get(self): \"\"\"Handle get",
"as f: db = json.load(f) is_closing = False def signal_handler(signum, frame): \"\"\"Signal handler",
"tokens_found = [] for token in tokens: s = note[\"title\"] + \" \"",
"in' database for use.\"\"\" global db db_path = DB_PATH if is_using_test_db(): db_path =",
"for i, note in enumerate(db['notes']): if note[\"title\"] == title: return i, note return",
"db flag.\"\"\" return \"--use-test-db\" in sys.argv routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler),",
"tornado.web.RequestHandler): \"\"\"Set up CORS and allow separate origin for the client.\"\"\" CORS_ORIGIN =",
"enumerate(db['notes']): if note[\"title\"] == title: return i, note return False def search_notes(query): \"\"\"Search",
"response = { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler):",
"end with expected state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if started with use",
"started with use test db flag.\"\"\" return \"--use-test-db\" in sys.argv routes = [",
"return i, note return False def search_notes(query): \"\"\"Search notes by query.\"\"\" def match_token(note,",
"note text' } ] } def tokenize(s): \"\"\"Split string into tokens.\"\"\" return [p.lower()",
"NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does not even exist.\".format(title)) class NotesTitlesHandler(CorsBaseHandler): \"\"\"Handle /notes/titles",
"}, 'notes': [ { 'title': 'some note title', 'text': 'some note text' },",
"{ 'notes': search_notes(self.get_argument('q')) } self.write(response) class TestBeginHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup",
"True }, 'notes': [ { 'title': 'some note title', 'text': 'some note text'",
"read_db() class TestEndHandler(CorsBaseHandler): \"\"\"Handle /test/begin .\"\"\" def get(self): \"\"\"Setup test to have end",
"flag.\"\"\" return \"--use-test-db\" in sys.argv routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\",",
"note): \"\"\"Update an existing note with a given title, possibly retitling it.\"\"\" found",
"\" \" + note[\"text\"] if token not in s.lower(): return False tokens_found.append(token) return",
"NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes = [ (r\"/test/begin\", TestBeginHandler), (r\"/test/end\", TestEndHandler) ] if",
"TestEndHandler) ] if is_using_test_db(): routes.extend(test_routes) application = tornado.web.Application(routes) def read_db(): \"\"\"'Read in' database",
"is_closing logging.info('exiting...') is_closing = True def try_exit(): \"\"\"Try closing tornado.\"\"\" global is_closing if",
"if is_closing: # clean up here tornado.ioloop.IOLoop.instance().stop() logging.info('exit success') def start(): \"\"\"Start tornado.\"\"\"",
"len(tokens) notes = [] query_tokens = tokenize(query) for note in db['notes']: if match_token(note,",
"'version': { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True }, 'notes': [ { 'title':",
"NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\", NoteHandler), (r\"/search\", NotesSearchHandler), ] test_routes = [ (r\"/test/begin\", TestBeginHandler),",
"' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle /version .\"\"\" def",
"self.finish(\"Note '{}' updated.\".format(new_title)) class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET DELETE \"\"\" def",
"= 3456 DB_PATH = \"db.json\" TEST_DB_PATH = \"test/test_db.json\" db = { 'version': {",
"import json import os import signal import sys PORT = 3456 DB_PATH =",
"new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found = find_note(find_title) if not found: add_note(_note)",
"note with title that is already taken.\"\"\" def __init__(self, title): \"\"\"Show exception with",
"get(self): \"\"\"Handle get and return all notes matching search query.\"\"\" response = {",
"and create / update give note.\"\"\" note = json.loads(self.request.body.decode('utf-8')) title_update = note[\"title\"] if",
"'Accept, ' 'X-Requested-With, ' 'Content-Type, ' 'Access-Control-Request-Method, ' 'Access-Control-Request-Headers' ) class VersionRootHandler(CorsBaseHandler): \"\"\"Handle",
"(index, note) of note that has title or False if no such note.\"\"\"",
"= [] query_tokens = tokenize(query) for note in db['notes']: if match_token(note, query_tokens): notes.append(note)",
"\"--use-test-db\" in sys.argv routes = [ (r\"/version\", VersionRootHandler), (r\"/notes\", NotesRootHandler), (r\"/notes/titles\", NotesTitlesHandler), (r\"/note/(.*)\",",
"'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True }, 'notes': [ { 'title': 'some note",
"the client.\"\"\" CORS_ORIGIN = 'http://localhost:8080' CORS_METHODS = 'GET, PUT, DELETE' CORS_HEADERS = (",
"title_update _note = { 'title': new_title, 'text': note[\"text\"], 'timestamp': note[\"timestamp\"] } found =",
"given title from database.\"\"\" try: delete_note(title) except NoSuchNoteExists: self.clear() self.set_status(404) self.finish(\"Note '{}' does",
"== len(tokens) notes = [] query_tokens = tokenize(query) for note in db['notes']: if",
"note title', 'text': 'other note text' } ] } def tokenize(s): \"\"\"Split string",
"class NoteHandler(CorsBaseHandler): \"\"\"Handle /note/(.*) . /note/:title GET DELETE \"\"\" def get(self, title): \"\"\"Handle",
"matching search query.\"\"\" response = { 'notes': [] } if self.get_argument('q') == \"\":",
"from database.\"\"\" response = { 'notes': db['notes'] } self.write(response) def put(self, *args, **kwargs):",
"{ 'version': '0.0.1', 'api_version': '0.1', 'is_test_db': True } self.write(response) class NotesRootHandler(CorsBaseHandler): \"\"\"Handle /notes",
"False def signal_handler(signum, frame): \"\"\"Signal handler for closing tornado.\"\"\" global is_closing logging.info('exiting...') is_closing",
"TEST_DB_PATH = \"test/test_db.json\" db = { 'version': { 'version': '0.0.1', 'api_version': '0.1', 'is_test_db':",
"db['notes']: if match_token(note, query_tokens): notes.append(note) return notes class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler): \"\"\"Set up CORS",
"\"\"\"Handle get and return all notes matching search query.\"\"\" response = { 'notes':",
"if trying to delete a note that doesn't exist.\"\"\" def __init__(self, title): \"\"\"Show",
"\"\"\"Show exception with the note title.\"\"\" super(NoSuchNoteExists, self).__init__(title) def add_note(note): \"\"\"Add note to",
"title or False if no such note.\"\"\" for i, note in enumerate(db['notes']): if",
"db_path = TEST_DB_PATH logging.info(\"server path:\", os.path.abspath(__file__)) logging.info(\"server: db_path:\", db_path) with open(db_path) as f:",
"test to have end with expected state afterwards.\"\"\" read_db() def is_using_test_db(): \"\"\"Check if",
"= found[1] self.write(response) def delete(self, title): \"\"\"Handle delete and delete note with given"
] |
[
"\"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\": 'abc'})) == \"<ins id=\\\"123\\\" class=\\\"abclass\\\"",
"def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\": 'abc'}))",
"sofi.ui import Inserted def test_basic(): assert(str(Inserted()) == \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\")",
"assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\": 'abc'})) == \"<ins",
"== \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\",",
"test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\": 'abc'})) == \"<ins id=\\\"123\\\" class=\\\"abclass\\\" style=\\\"font-size:0.9em;\\\" data-test=\\\"abc\\\">text</ins>\")",
"from sofi.ui import Inserted def test_basic(): assert(str(Inserted()) == \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) ==",
"def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\": 'abc'})) == \"<ins id=\\\"123\\\" class=\\\"abclass\\\" style=\\\"font-size:0.9em;\\\"",
"== \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\": 'abc'})) == \"<ins id=\\\"123\\\"",
"Inserted def test_basic(): assert(str(Inserted()) == \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs():",
"import Inserted def test_basic(): assert(str(Inserted()) == \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def",
"def test_basic(): assert(str(Inserted()) == \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\",",
"test_basic(): assert(str(Inserted()) == \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass',",
"\"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\":",
"test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123', style=\"font-size:0.9em;\", attrs={\"data-test\": 'abc'})) ==",
"assert(str(Inserted()) == \"<ins></ins>\") def test_text(): assert(str(Inserted(\"text\")) == \"<ins>text</ins>\") def test_custom_class_ident_style_and_attrs(): assert(str(Inserted(\"text\", cl='abclass', ident='123',"
] |
[
"cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for i in",
"0 if(initm + mRange[1] > (height - 1)): diff = ((initm + mRange[1])",
"width, W): mRange = [] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm",
"= float(n) if float_n < 0: raise argparse.ArgumentTypeError(\"%s is negative \" % n)",
"would have made it a little more efficient, instead I just decided to",
"brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)]",
"diff = ((initn + nRange[1]) - (width - 1)) nRange[1] -= diff windowHeight",
"in range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy =",
"# texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0 # for pair in texturePairs:",
"+ size, fill=0) im = Image.open(brodatz + \"D\" + str(circleInt) + \".png\") output",
"range(len(subOuts)): dest = brodatz + \"cropRow\" + str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz",
"I pair all of the textures to every other texture. If I did",
"in the papers already. # # We can use any of the 112",
"brodatz + \"Nat5crop.png\", 0) size = (128, 128) mask = Image.new('L', size, color=255)",
"= [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W / 2)))",
"to read in some feature vectors instead of creating them. def readInFeatureVectorsFromFile(dir): list",
"else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means algorithm for classification def clusterFeatureVectors(featureVectors,",
"0) # Destroy all sub crops (we can make this optional if we",
"width = img.shape for row in range(height): for col in range(width): featureVector =",
"def createTexturePair(pair, outName): pathsToTemp = [brodatz + \"D\" + str(pair[0]) + \".png\", brodatz",
"spatial weighting also takes place here. # The mean can be subtracted if",
"every other texture. If I did so, # I would have made it",
"384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in",
"(initm < 0): mRange[1] += initm initm = 0 if (initn < 0):",
"function that checks boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width,",
"+ \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- #",
"= [] height, width = img.shape for row in range(height): for col in",
"1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- # Note that I did",
"import numpy as np from PIL import Image, ImageOps, ImageDraw import os, glob",
"mean can be subtracted if specified by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1):",
"up old filter and feature images if the user chose to print them.",
"subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = []",
"images that were in the papers already. # # We can use any",
"def deleteCroppedImages(): for filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType):",
"+ \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size = (128, 128) mask",
"instead I just decided to # use the images that were in the",
"the variance to be 1 (unit variance), # spatial weighting also takes place",
"texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0 # for pair in texturePairs: #",
"pathsToTemp = [brodatz + \"D\" + str(pair[0]) + \".png\", brodatz + \"D\" +",
"the image! # nat5 = [77,55,84,17] # circleInt = 24 # outName =",
"# Checks user input (i.e. cannot have a negative weighting value) def check_positive_float(n):",
"cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1],",
"If we want to read in some feature vectors instead of creating them.",
"filter and feature images if the user chose to print them. def deleteExistingSubResults(outputPath):",
"\".png\", imageToPrint) i+=1 # Print the final result, the user can also choose",
"texturePairs: # outName = brodatz + \"pair\" + str(count) + \".png\" # createTexturePair(pair,",
"went about # cropping the input images. I left them here, in the",
"0 if (initn < 0): nRange[1] += initn initn = 0 if(initm +",
"2] for x in xrange(0, len(listOfRowOutputs), 2)] dests = [] for i in",
"open(dir)] list = [i.split() for i in list] newList = [] for row",
"nRange[1] > (width-1)): diff = ((initn + nRange[1]) - (width - 1)) nRange[1]",
"the k means algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors)",
"sklearn.cluster as clstr import cv2 import numpy as np from PIL import Image,",
"later def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for vector in featureVectors: for",
"str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order to generate a 256x256 image",
"[] for item in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList #",
"the final result, the user can also choose to make the output grey",
"item in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print the",
"x in xrange(0, len(listOfRowOutputs), 2)] dests = [] for i in range(len(subOuts)): dest",
"line in open(dir)] list = [i.split() for i in list] newList = []",
"for item in vector: f.write(str(item) + \" \") f.write(\"\\n\") f.close() # If we",
"if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the",
"the papers already. # # We can use any of the 112 images",
"if specified by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for",
"row and column data def constructFeatureVectors(featureImages, img): featureVectors = [] height, width =",
"img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background = Image.open(brodatz +",
"inDir, outDir): box = (x_offset, Y_offset, width, height) image = Image.open(inDir) crop =",
"axisType): images = [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType))",
"outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz + \"D\" + str(pair[0])",
"the input images. I left them here, in the case that Brodatz #",
"is the function that checks boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row, col,",
"# nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName = \"Nat16.png\" #",
"[3,68,17,77] # howManyPerRow = 2 # outName = \"grid4.png\" # createGrid(grid4, outName, howManyPerRow)",
"for filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks user input",
"centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\")",
"if we are looking to save our feature vectors for later def printFeatureVectors(outDir,",
"to save our feature vectors for later def printFeatureVectors(outDir, featureVectors): f = open(outDir,",
"were downloaded and cropped as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width,",
"cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for i",
"place here. # The mean can be subtracted if specified by the implementation.",
"Y_offset, width, height, inDir, outDir): box = (x_offset, Y_offset, width, height) image =",
"pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat",
"texture. If I did so, # I would have made it a little",
"image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def",
"= [[17,77],[3,68],[3,17],[55,68]] # count = 0 # for pair in texturePairs: # outName",
"xrange(0, len(listOfRowOutputs), 2)] dests = [] for i in range(len(subOuts)): dest = brodatz",
"\".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz",
"len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])):",
"+ \"D\" + str(pair[1]) + \".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz +",
"deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- # Note that I did not",
"2, (bg_h - img_h) / 2) background.paste(output, offset, img) background.save(brodatz + outName, format=\"png\")",
"\"D\" + str(pair[1]) + \".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\")",
"return int_n # Checks user input (i.e. cannot have a negative weighting value)",
"= [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg =",
"\"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images",
"/= len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)):",
"def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for col in range(0, len(featureVectors[0])): colMean",
"howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i in range(len(subOuts)):",
"transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background = Image.open(brodatz",
"pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat,",
"means.append(colMean) for col in range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col] -=",
"in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for row",
"+ \".png\", brodatz + \"D\" + str(pair[1]) + \".png\"] cropTexture(256, 256, 384, 384,",
"np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs",
"# count = 0 # for pair in texturePairs: # outName = brodatz",
"= [] for col in range(0, len(featureVectors[0])): colMean = 0 for row in",
"int_n < 0: raise argparse.ArgumentTypeError(\"%s is negative\" % n) return int_n # Checks",
"str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz + \"cropD\" + str(listOfBrodatzInts[i]) + \".png\" #",
"Create the feature vectors and add in row and column data def constructFeatureVectors(featureImages,",
"labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means algorithm for classification def",
"user chose to print them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith(\"filter\")",
"initn = int(round(col - math.floor(W / 2))) if (initm < 0): mRange[1] +=",
"downloaded and cropped as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height,",
"len(labels)): for col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir,",
"+ \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests,",
"= img.shape for row in range(height): for col in range(width): featureVector = []",
"[29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName = \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow)",
"\"D\" + str(circleInt) + \".png\") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask)",
"def printFeatureImages(featureImages, naming, printlocation): i =0 for image in featureImages: # Normalize to",
"in range(0, len(labels)): for col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] =",
"normalize data before clustering occurs. # Whiten sets the variance to be 1",
"make the output grey def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels =",
"- math.floor(W / 2))) if (initm < 0): mRange[1] += initm initm =",
"# #the last int is the circle in the middle of the image!",
"\"Nat5crop.png\", 0) size = (128, 128) mask = Image.new('L', size, color=255) draw =",
"made it a little more efficient, instead I just decided to # use",
"len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for row in range(0,",
"\"\\\\\" + naming + str(i) + \".png\", imageToPrint) i+=1 # Print the final",
"# grid4 = [3,68,17,77] # howManyPerRow = 2 # outName = \"grid4.png\" #",
"float_n = float(n) if float_n < 0: raise argparse.ArgumentTypeError(\"%s is negative \" %",
"howManyPerRow) # # grid4 = [3,68,17,77] # howManyPerRow = 2 # outName =",
"mask size value) def check_positive_int(n): int_n = int(n) if int_n < 0: raise",
"raise argparse.ArgumentTypeError(\"%s is negative\" % n) return int_n # Checks user input (i.e.",
"str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) + \".png\", 1)",
"kmeans.labels_ return labels # To clean up old filter and feature images if",
"offset, img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz",
"< 0: raise argparse.ArgumentTypeError(\"%s is negative \" % n) return float_n #-------------------------------------------------------------------------- #",
"we want to read in some feature vectors instead of creating them. def",
"+ \".png\" # 128x128 crops, in order to generate a 512x512 image cropTexture(256,",
"algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_",
"os, glob import matplotlib.pyplot as pyplt import scipy.cluster.vq as vq import argparse import",
"grid4 = [3,68,17,77] # howManyPerRow = 2 # outName = \"grid4.png\" # createGrid(grid4,",
"brodatz + \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0)",
"return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize data before clustering occurs.",
"vectors instead of creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line in",
"nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName = \"Nat16.png\" # createGrid(nat16,",
"= Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h = background.size offset = ((bg_w - img_w)",
"(filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks user input (i.e. cannot have a negative",
"+ howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i in",
"outName): pathsToTemp = [brodatz + \"D\" + str(pair[0]) + \".png\", brodatz + \"D\"",
"images #-------------------------------------------------------------------------- # Note that I did not write this to have an",
"= 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count",
"background.size offset = ((bg_w - img_w) / 2, (bg_h - img_h) / 2)",
"in range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row",
"kmeans.fit(featureVectors) labels = kmeans.labels_ return labels # To clean up old filter and",
"# for pair in texturePairs: # outName = brodatz + \"pair\" + str(count)",
"for x in xrange(0, len(listOfRowOutputs), 2)] dests = [] for i in range(len(subOuts)):",
"this to have an exhaustive approach in mind, # where I pair all",
"0: raise argparse.ArgumentTypeError(\"%s is negative\" % n) return int_n # Checks user input",
"imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming +",
"= (x_offset, Y_offset, width, height) image = Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\")",
"Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName",
"the function that checks boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height,",
"windowHeight = mRange[1] - mRange[0] windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)),",
"\"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) # # grid4 = [3,68,17,77] # howManyPerRow =",
"# spatial weighting also takes place here. # The mean can be subtracted",
"= image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename)",
"size, fill=0) im = Image.open(brodatz + \"D\" + str(circleInt) + \".png\") output =",
"image in featureImages: # Normalize to intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255,",
"images if the user chose to print them. def deleteExistingSubResults(outputPath): for filename in",
"concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\",",
"# where I pair all of the textures to every other texture. If",
"2))) if (initm < 0): mRange[1] += initm initm = 0 if (initn",
"to normalize data before clustering occurs. # Whiten sets the variance to be",
"and cropped as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir,",
"Destroy all sub crops (we can make this optional if we want!) deleteCroppedImages()",
"any of the 112 images from the Brodatz album here # nat16 =",
"def getRanges_for_window_with_adjust(row, col, height, width, W): mRange = [] nRange = [] mRange.append(0)",
"W): mRange = [] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm =",
"in range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector)",
"diff = ((initm + mRange[1]) - (height - 1)) mRange[1] -= diff if(initn",
"def check_positive_float(n): float_n = float(n) if float_n < 0: raise argparse.ArgumentTypeError(\"%s is negative",
"background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz + \"D\"",
"to print them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")):",
"beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming + str(i) + \".png\", imageToPrint)",
"[77,55,84,17] # circleInt = 24 # outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName)",
"check_positive_float(n): float_n = float(n) if float_n < 0: raise argparse.ArgumentTypeError(\"%s is negative \"",
"= ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im = Image.open(brodatz + \"D\" +",
"def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for vector in featureVectors: for item",
"images from the Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow =",
"the middle of the image! # nat5 = [77,55,84,17] # circleInt = 24",
"= img.size background = Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h = background.size offset =",
"have a negative weighting value) def check_positive_float(n): float_n = float(n) if float_n <",
"classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels",
"2) background.paste(output, offset, img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp",
"1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size = (128, 128) mask = Image.new('L',",
"input images. I left them here, in the case that Brodatz # textures",
"createGrid(grid4, outName, howManyPerRow) # #the last int is the circle in the middle",
"= [] featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors #",
"int_n = int(n) if int_n < 0: raise argparse.ArgumentTypeError(\"%s is negative\" % n)",
"\"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy",
"= cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming + str(i)",
"brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs),",
"range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col in range(2,",
"=0 for image in featureImages: # Normalize to intensity values imageToPrint = cv2.normalize(image,",
"exhaustive approach in mind, # where I pair all of the textures to",
"sets the variance to be 1 (unit variance), # spatial weighting also takes",
"n) return float_n #-------------------------------------------------------------------------- # All of the functions below were left here",
"= \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) # # grid4 = [3,68,17,77] # howManyPerRow",
"printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for vector in featureVectors: for item in",
"I just decided to # use the images that were in the papers",
"0): mRange[1] += initm initm = 0 if (initn < 0): nRange[1] +=",
"list = [i.split() for i in list] newList = [] for row in",
"background = Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h = background.size offset = ((bg_w -",
"can use any of the 112 images from the Brodatz album here #",
"colMean = 0 for row in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /=",
"images. I left them here, in the case that Brodatz # textures were",
"dests = [] for i in range(len(subOuts)): dest = brodatz + \"cropRow\" +",
"# 128x128 crops, in order to generate a 256x256 image cropTexture(256, 256, 384,",
"howManyPerRow = 2 # outName = \"grid4.png\" # createGrid(grid4, outName, howManyPerRow) # #the",
"for col in range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col]",
"for row in range(height): for col in range(width): featureVector = [] featureVector.append(row) featureVector.append(col)",
"outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = []",
"= 0 # for pair in texturePairs: # outName = brodatz + \"pair\"",
"len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return copy # Create the feature",
"[brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create",
"import sklearn.cluster as clstr import cv2 import numpy as np from PIL import",
"list = [line.rstrip('\\n') for line in open(dir)] list = [i.split() for i in",
"in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i in range(len(subOuts)): dest = brodatz",
"means algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels =",
"\"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function that checks boundaries when performing spatial convolution.",
"be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function that",
"user input (i.e. cannot have a negative weighting value) def check_positive_float(n): float_n =",
"in open(dir)] list = [i.split() for i in list] newList = [] for",
"# createGrid(nat16, outName, howManyPerRow) # # grid4 = [3,68,17,77] # howManyPerRow = 2",
"to generate a 512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts",
"in mind, # where I pair all of the textures to every other",
"for later def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for vector in featureVectors:",
"performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width, W): mRange = [] nRange",
"= 2 # outName = \"grid4.png\" # createGrid(grid4, outName, howManyPerRow) # #the last",
"size = (128, 128) mask = Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0,",
"0 # for pair in texturePairs: # outName = brodatz + \"pair\" +",
"generate a 512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts =",
"image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2]",
"(height - 1)) mRange[1] -= diff if(initn + nRange[1] > (width-1)): diff =",
"f.write(\"\\n\") f.close() # If we want to read in some feature vectors instead",
"clustering occurs. # Whiten sets the variance to be 1 (unit variance), #",
"len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row in range(0,",
"- nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize data before",
"[i.split() for i in list] newList = [] for row in list: newRow",
"height, inDir, outDir): box = (x_offset, Y_offset, width, height) image = Image.open(inDir) crop",
"alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming + str(i) + \".png\",",
"these if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is",
"fill=0) im = Image.open(brodatz + \"D\" + str(circleInt) + \".png\") output = ImageOps.fit(im,",
"If I did so, # I would have made it a little more",
"(width-1)): diff = ((initn + nRange[1]) - (width - 1)) nRange[1] -= diff",
"featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra function if we",
"to demonstrate how I went about # cropping the input images. I left",
"def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks",
"for i in range(len(subOuts)): dest = brodatz + \"cropRow\" + str(i) + \".png\"",
"Brodatz # textures were downloaded and cropped as new input images. #-------------------------------------------------------------------------- def",
"glob # We can specify these if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut",
"the user chose to print them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if",
"# # We can use any of the 112 images from the Brodatz",
"checks boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width, W): mRange",
"= [i.split() for i in list] newList = [] for row in list:",
"outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] #",
"concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- # Note that",
"to make the output grey def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels",
"use any of the 112 images from the Brodatz album here # nat16",
"have a negative mask size value) def check_positive_int(n): int_n = int(n) if int_n",
"that Brodatz # textures were downloaded and cropped as new input images. #--------------------------------------------------------------------------",
"\"cropRow\" + str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) +",
"str(circleInt) + \".png\") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz +",
"in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir,",
"labels = labels.reshape(img.shape) for row in range(0, len(labels)): for col in range(0, len(labels[0])):",
"mRange[1] += initm initm = 0 if (initn < 0): nRange[1] += initn",
"= [] for row in list: newRow = [] for item in row:",
"import Image, ImageOps, ImageDraw import os, glob import matplotlib.pyplot as pyplt import scipy.cluster.vq",
"range(0, len(labels)): for col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity",
"mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz +",
"createTexturePair(pair, outName): pathsToTemp = [brodatz + \"D\" + str(pair[0]) + \".png\", brodatz +",
"= [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)] dests = []",
"len(featureVectors[0])): colMean = 0 for row in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean",
"\"grid4.png\" # createGrid(grid4, outName, howManyPerRow) # #the last int is the circle in",
"concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy all sub crops (we can make",
"numpy as np from PIL import Image, ImageOps, ImageDraw import os, glob import",
"Print the intermediate results before clustering occurs def printFeatureImages(featureImages, naming, printlocation): i =0",
"means = [] for col in range(0, len(featureVectors[0])): colMean = 0 for row",
"user input (i.e. cannot have a negative mask size value) def check_positive_int(n): int_n",
"ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz",
"deleteCroppedImages(): for filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images",
"chose to print them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or",
"spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width, W): mRange = [] nRange =",
"before clustering occurs. # Whiten sets the variance to be 1 (unit variance),",
"cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for",
"the images that were in the papers already. # # We can use",
"os.remove(filename) # Checks user input (i.e. cannot have a negative mask size value)",
"image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow]",
"= [brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- #",
"and add in row and column data def constructFeatureVectors(featureImages, img): featureVectors = []",
"for vector in featureVectors: for item in vector: f.write(str(item) + \" \") f.write(\"\\n\")",
"img_w) / 2, (bg_h - img_h) / 2) background.paste(output, offset, img) background.save(brodatz +",
"circle in the middle of the image! # nat5 = [77,55,84,17] # circleInt",
"deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput",
"I went about # cropping the input images. I left them here, in",
"featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for row in range(0, len(featureVectors)):",
"crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages,",
"= 0 for row in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors)",
"PIL import Image, ImageOps, ImageDraw import os, glob import matplotlib.pyplot as pyplt import",
"in featureImages: # Normalize to intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,",
"< 0): nRange[1] += initn initn = 0 if(initm + mRange[1] > (height",
"nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize data",
"in the case that Brodatz # textures were downloaded and cropped as new",
"0) + size, fill=0) im = Image.open(brodatz + \"D\" + str(circleInt) + \".png\")",
"= clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels # To clean up old",
"Checks user input (i.e. cannot have a negative weighting value) def check_positive_float(n): float_n",
"argparse import glob # We can specify these if need be. brodatz =",
"listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\" +",
"column data def constructFeatureVectors(featureImages, img): featureVectors = [] height, width = img.shape for",
"- mRange[0] windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) #",
"(height - 1)): diff = ((initm + mRange[1]) - (height - 1)) mRange[1]",
"open(outDir, 'w') for vector in featureVectors: for item in vector: f.write(str(item) + \"",
"range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return",
"bg_w, bg_h = background.size offset = ((bg_w - img_w) / 2, (bg_h -",
"= brodatz + \"pair\" + str(count) + \".png\" # createTexturePair(pair, outName) # count",
"+ \"pair\" + str(count) + \".png\" # createTexturePair(pair, outName) # count += 1",
"colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])): for",
"# Create the feature vectors and add in row and column data def",
"readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line in open(dir)] list = [i.split() for i",
"= brodatz + \"cropD\" + str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order",
"\".png\" # 128x128 crops, in order to generate a 512x512 image cropTexture(256, 256,",
"cropping the input images. I left them here, in the case that Brodatz",
"(i.e. cannot have a negative weighting value) def check_positive_float(n): float_n = float(n) if",
"some feature vectors instead of creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for",
"listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests =",
"An extra function if we are looking to save our feature vectors for",
"- (height - 1)) mRange[1] -= diff if(initn + nRange[1] > (width-1)): diff",
"512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x +",
"cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow):",
"outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call",
"import argparse import glob # We can specify these if need be. brodatz",
"image = Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename in",
"f.write(str(item) + \" \") f.write(\"\\n\") f.close() # If we want to read in",
"newList # Print the intermediate results before clustering occurs def printFeatureImages(featureImages, naming, printlocation):",
"+ \".png\", imageToPrint) i+=1 # Print the final result, the user can also",
"= 24 # outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # # texturePairs",
"values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming",
"\") f.write(\"\\n\") f.close() # If we want to read in some feature vectors",
"getRanges_for_window_with_adjust(row, col, height, width, W): mRange = [] nRange = [] mRange.append(0) mRange.append(W-1)",
"range(height): for col in range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage in",
"< 0): mRange[1] += initm initm = 0 if (initn < 0): nRange[1]",
"col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else:",
"negative mask size value) def check_positive_int(n): int_n = int(n) if int_n < 0:",
"# createGridWithCircle(nat5, circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0",
"ImageOps, ImageDraw import os, glob import matplotlib.pyplot as pyplt import scipy.cluster.vq as vq",
"occurs. # Whiten sets the variance to be 1 (unit variance), # spatial",
"were left here to demonstrate how I went about # cropping the input",
"the Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 #",
"256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x +",
"import cv2 import numpy as np from PIL import Image, ImageOps, ImageDraw import",
"greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row in range(0, len(labels)): for col in",
"# Print the final result, the user can also choose to make the",
"here to demonstrate how I went about # cropping the input images. I",
"# use the images that were in the papers already. # # We",
"little more efficient, instead I just decided to # use the images that",
"the textures to every other texture. If I did so, # I would",
"dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming + str(i) + \".png\", imageToPrint) i+=1 #",
"want to read in some feature vectors instead of creating them. def readInFeatureVectorsFromFile(dir):",
"((initn + nRange[1]) - (width - 1)) nRange[1] -= diff windowHeight = mRange[1]",
"if(initm + mRange[1] > (height - 1)): diff = ((initm + mRange[1]) -",
"outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput",
"value) def check_positive_float(n): float_n = float(n) if float_n < 0: raise argparse.ArgumentTypeError(\"%s is",
"nRange[1] += initn initn = 0 if(initm + mRange[1] > (height - 1)):",
"just decided to # use the images that were in the papers already.",
"sub crops (we can make this optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts,",
"mRange[1]) - (height - 1)) mRange[1] -= diff if(initn + nRange[1] > (width-1)):",
"if int_n < 0: raise argparse.ArgumentTypeError(\"%s is negative\" % n) return int_n #",
"if(initn + nRange[1] > (width-1)): diff = ((initn + nRange[1]) - (width -",
"- 1)) nRange[1] -= diff windowHeight = mRange[1] - mRange[0] windowWidth = nRange[1]",
"mRange[1] > (height - 1)): diff = ((initm + mRange[1]) - (height -",
"256, 384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\", brodatz",
"outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- # Note that I",
"To clean up old filter and feature images if the user chose to",
"k means algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels",
"img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row in range(0, len(labels)): for",
"\"Nat5crop.png\") bg_w, bg_h = background.size offset = ((bg_w - img_w) / 2, (bg_h",
"initn = 0 if(initm + mRange[1] > (height - 1)): diff = ((initm",
"of the textures to every other texture. If I did so, # I",
"# outName = brodatz + \"pair\" + str(count) + \".png\" # createTexturePair(pair, outName)",
"can specify these if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" #",
"+ \"\\\\\" + naming + str(i) + \".png\", imageToPrint) i+=1 # Print the",
"Create test images #-------------------------------------------------------------------------- # Note that I did not write this to",
"instead of creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line in open(dir)]",
"windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to",
"n) return int_n # Checks user input (i.e. cannot have a negative weighting",
"cropped as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir, outDir):",
"(x_offset, Y_offset, width, height) image = Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\") def",
"for image in featureImages: # Normalize to intensity values imageToPrint = cv2.normalize(image, alpha=0,",
"mRange[1] -= diff if(initn + nRange[1] > (width-1)): diff = ((initn + nRange[1])",
"math.floor(W / 2))) if (initm < 0): mRange[1] += initm initm = 0",
"[[17,77],[3,68],[3,17],[55,68]] # count = 0 # for pair in texturePairs: # outName =",
"Call the k means algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k)",
"+ str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) + \".png\",",
"0): nRange[1] += initn initn = 0 if(initm + mRange[1] > (height -",
"row in range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for",
"them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line in open(dir)] list = [i.split()",
"vectors for later def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for vector in",
"for row in list: newRow = [] for item in row: floatitem =",
"pyplt import scipy.cluster.vq as vq import argparse import glob # We can specify",
"outDir): box = (x_offset, Y_offset, width, height) image = Image.open(inDir) crop = image.crop(box)",
"I did so, # I would have made it a little more efficient,",
"left here to demonstrate how I went about # cropping the input images.",
"an exhaustive approach in mind, # where I pair all of the textures",
"= brodatz + \"cropRow\" + str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\"",
"row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for",
"int(round(initm)), int(round(initn)) # Used to normalize data before clustering occurs. # Whiten sets",
"# textures were downloaded and cropped as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset,",
"= labels.reshape(img.shape) for row in range(0, len(labels)): for col in range(0, len(labels[0])): outputIntensity",
"old filter and feature images if the user chose to print them. def",
"outName, howManyPerRow) # # grid4 = [3,68,17,77] # howManyPerRow = 2 # outName",
"in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col in",
"row in list: newRow = [] for item in row: floatitem = float(item)",
"img.shape for row in range(height): for col in range(width): featureVector = [] featureVector.append(row)",
"[] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W / 2))) initn",
"concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName,",
"# This is the function that checks boundaries when performing spatial convolution. def",
"((initm + mRange[1]) - (height - 1)) mRange[1] -= diff if(initn + nRange[1]",
"i in list] newList = [] for row in list: newRow = []",
"The mean can be subtracted if specified by the implementation. def normalizeData(featureVectors, setMeanToZero,",
"= vq.whiten(featureVectors) if (setMeanToZero): for row in range(0, len(featureVectors)): for col in range(0,",
"[line.rstrip('\\n') for line in open(dir)] list = [i.split() for i in list] newList",
"+ \"cropD\" + str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order to generate",
"xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i in range(len(subOuts)): dest = brodatz +",
"for i in list] newList = [] for row in list: newRow =",
"'w') for vector in featureVectors: for item in vector: f.write(str(item) + \" \")",
"to have an exhaustive approach in mind, # where I pair all of",
"len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if",
"outName = \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) # # grid4 = [3,68,17,77] #",
"is the circle in the middle of the image! # nat5 = [77,55,84,17]",
"if (initn < 0): nRange[1] += initn initn = 0 if(initm + mRange[1]",
"+ str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz + \"cropD\" + str(listOfBrodatzInts[i]) + \".png\"",
"col, height, width, W): mRange = [] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0)",
"for classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return",
"# We can use any of the 112 images from the Brodatz album",
"[] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir,",
"384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz +",
"from the Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4",
"labels.reshape(img.shape)) # Call the k means algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans",
"I would have made it a little more efficient, instead I just decided",
"- img_h) / 2) background.paste(output, offset, img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def",
"for row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return copy",
"have an exhaustive approach in mind, # where I pair all of the",
"\".png\") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0)",
"\".png\", brodatz + \"D\" + str(pair[1]) + \".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0],",
"album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName =",
"approach in mind, # where I pair all of the textures to every",
"str(i) + \".png\", imageToPrint) i+=1 # Print the final result, the user can",
"brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests",
"def constructFeatureVectors(featureImages, img): featureVectors = [] height, width = img.shape for row in",
"range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return copy # Create the",
"as np from PIL import Image, ImageOps, ImageDraw import os, glob import matplotlib.pyplot",
"use the images that were in the papers already. # # We can",
"\"PNG\") def deleteCroppedImages(): for filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir,",
"outName): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\"",
"[] height, width = img.shape for row in range(height): for col in range(width):",
"-= diff if(initn + nRange[1] > (width-1)): diff = ((initn + nRange[1]) -",
"+ \".png\" # 128x128 crops, in order to generate a 256x256 image cropTexture(256,",
"cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box = (x_offset, Y_offset, width, height) image",
"if (setMeanToZero): for row in range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col]",
"featureImages: # Normalize to intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)",
"-= diff windowHeight = mRange[1] - mRange[0] windowWidth = nRange[1] - nRange[0] return",
"item in vector: f.write(str(item) + \" \") f.write(\"\\n\") f.close() # If we want",
"them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) #",
"+ outName, 0) # Destroy all sub crops (we can make this optional",
"if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for i",
"'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count =",
"vq import argparse import glob # We can specify these if need be.",
"i in range(len(subOuts)): dest = brodatz + \"cropRow\" + str(i) + \".png\" dests.append(dest)",
"= int(round(row - math.floor(W / 2))) initn = int(round(col - math.floor(W / 2)))",
"of creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line in open(dir)] list",
"I did not write this to have an exhaustive approach in mind, #",
"feature vectors instead of creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line",
"[] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\" + str(listOfBrodatzInts[i]) +",
"brodatz + \"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat =",
"circleInt = 24 # outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # #",
"subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)] dests =",
"\"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat = [brodatz +",
"brodatzCropOutput = brodatz + \"cropD\" + str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in",
"if the user chose to print them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath):",
"= [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for",
"brodatz + outName, 0) # Destroy all sub crops (we can make this",
"import matplotlib.pyplot as pyplt import scipy.cluster.vq as vq import argparse import glob #",
"for item in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print",
"specify these if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This",
"save our feature vectors for later def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w')",
"# Print the intermediate results before clustering occurs def printFeatureImages(featureImages, naming, printlocation): i",
"range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape))",
"= brodatz + \"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz + \"cropD\"",
"nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W /",
"cannot have a negative weighting value) def check_positive_float(n): float_n = float(n) if float_n",
"we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for i in",
"mask = Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0)",
"count = 0 # for pair in texturePairs: # outName = brodatz +",
"data def constructFeatureVectors(featureImages, img): featureVectors = [] height, width = img.shape for row",
"128x128 crops, in order to generate a 256x256 image cropTexture(256, 256, 384, 384,",
"test images #-------------------------------------------------------------------------- # Note that I did not write this to have",
"mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W / 2))) initn = int(round(col",
"+ str(pair[0]) + \".png\", brodatz + \"D\" + str(pair[1]) + \".png\"] cropTexture(256, 256,",
"range(0, len(featureVectors[0])): colMean = 0 for row in range(0, len(featureVectors)): colMean += featureVectors[row][col]",
"nRange[1]) - (width - 1)) nRange[1] -= diff windowHeight = mRange[1] - mRange[0]",
"thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return",
"[] for i in range(len(subOuts)): dest = brodatz + \"cropRow\" + str(i) +",
"kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels # To clean up",
"textures were downloaded and cropped as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset,",
"0 for row in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean)",
"= Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im",
"can be subtracted if specified by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means",
"256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x",
"output grey def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for",
"concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function that checks boundaries when performing",
"col in range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col])",
"feature images if the user chose to print them. def deleteExistingSubResults(outputPath): for filename",
"+ nRange[1] > (width-1)): diff = ((initn + nRange[1]) - (width - 1))",
"384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\")",
"384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"]",
"efficient, instead I just decided to # use the images that were in",
"= [] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row -",
"= kmeans.labels_ return labels # To clean up old filter and feature images",
"are looking to save our feature vectors for later def printFeatureVectors(outDir, featureVectors): f",
"= [line.rstrip('\\n') for line in open(dir)] list = [i.split() for i in list]",
"# Checks user input (i.e. cannot have a negative mask size value) def",
"range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz",
"0) size = (128, 128) mask = Image.new('L', size, color=255) draw = ImageDraw.Draw(mask)",
"int_n # Checks user input (i.e. cannot have a negative weighting value) def",
"featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra function if we are looking to",
"read in some feature vectors instead of creating them. def readInFeatureVectorsFromFile(dir): list =",
"((bg_w - img_w) / 2, (bg_h - img_h) / 2) background.paste(output, offset, img)",
"= 0 if(initm + mRange[1] > (height - 1)): diff = ((initm +",
"= ((initm + mRange[1]) - (height - 1)) mRange[1] -= diff if(initn +",
"format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz + \"D\" + str(pair[0]) +",
"in featureVectors: for item in vector: f.write(str(item) + \" \") f.write(\"\\n\") f.close() #",
"# nat5 = [77,55,84,17] # circleInt = 24 # outName = 'Nat5.png' #",
"or filename.startswith(\"feature\")): os.remove(filename) # Checks user input (i.e. cannot have a negative mask",
"middle of the image! # nat5 = [77,55,84,17] # circleInt = 24 #",
"height, width, W): mRange = [] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1)",
"# createGrid(grid4, outName, howManyPerRow) # #the last int is the circle in the",
"2))) initn = int(round(col - math.floor(W / 2))) if (initm < 0): mRange[1]",
"return newList # Print the intermediate results before clustering occurs def printFeatureImages(featureImages, naming,",
"intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" +",
"+ \"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1)",
"in range(0, len(featureVectors[0])): colMean = 0 for row in range(0, len(featureVectors)): colMean +=",
"initm = 0 if (initn < 0): nRange[1] += initn initn = 0",
"also choose to make the output grey def printClassifiedImage(labels, k, img, outdir, greyOutput):",
"input (i.e. cannot have a negative weighting value) def check_positive_float(n): float_n = float(n)",
"function if we are looking to save our feature vectors for later def",
"# 128x128 crops, in order to generate a 512x512 image cropTexture(256, 256, 384,",
"order to generate a 256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput)",
"112 images from the Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow",
"#-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- # Note that I did not write",
"is negative\" % n) return int_n # Checks user input (i.e. cannot have",
"newList = [] for row in list: newRow = [] for item in",
"= cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for",
"/ 2))) initn = int(round(col - math.floor(W / 2))) if (initm < 0):",
"= int(n) if int_n < 0: raise argparse.ArgumentTypeError(\"%s is negative\" % n) return",
"i+=1 # Print the final result, the user can also choose to make",
"/ 2))) if (initm < 0): mRange[1] += initm initm = 0 if",
"All of the functions below were left here to demonstrate how I went",
"copy[row][1] *= spatialWeight return copy # Create the feature vectors and add in",
"- (width - 1)) nRange[1] -= diff windowHeight = mRange[1] - mRange[0] windowWidth",
"int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize data before clustering occurs. #",
"384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for x in",
"(bg_h - img_h) / 2) background.paste(output, offset, img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages()",
"4 # outName = \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) # # grid4 =",
"means[col] for row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return",
"# Destroy all sub crops (we can make this optional if we want!)",
"import math import sklearn.cluster as clstr import cv2 import numpy as np from",
"copy = vq.whiten(featureVectors) if (setMeanToZero): for row in range(0, len(featureVectors)): for col in",
"cannot have a negative mask size value) def check_positive_int(n): int_n = int(n) if",
"output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img",
"# # grid4 = [3,68,17,77] # howManyPerRow = 2 # outName = \"grid4.png\"",
"\".png\" brodatzCropOutput = brodatz + \"cropD\" + str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops,",
"for col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape))",
"(we can make this optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName):",
"norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming + str(i) + \".png\", imageToPrint) i+=1",
"= [] for item in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList",
"+ 'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background",
"we are looking to save our feature vectors for later def printFeatureVectors(outDir, featureVectors):",
"howManyPerRow = 4 # outName = \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) # #",
"+ outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz + \"D\" +",
"+ nRange[1]) - (width - 1)) nRange[1] -= diff windowHeight = mRange[1] -",
"os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage,",
"col in range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy",
"input (i.e. cannot have a negative mask size value) def check_positive_int(n): int_n =",
"img): featureVectors = [] height, width = img.shape for row in range(height): for",
"row in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col",
"in vector: f.write(str(item) + \" \") f.write(\"\\n\") f.close() # If we want to",
"grey def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row",
"= Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename in glob.glob(brodatz",
"featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra",
"float_n < 0: raise argparse.ArgumentTypeError(\"%s is negative \" % n) return float_n #--------------------------------------------------------------------------",
"order to generate a 512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput)",
"printlocation): i =0 for image in featureImages: # Normalize to intensity values imageToPrint",
"/ 2, (bg_h - img_h) / 2) background.paste(output, offset, img) background.save(brodatz + outName,",
"cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means algorithm for classification",
"height, width = img.shape for row in range(height): for col in range(width): featureVector",
"in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput =",
"2 # outName = \"grid4.png\" # createGrid(grid4, outName, howManyPerRow) # #the last int",
"if float_n < 0: raise argparse.ArgumentTypeError(\"%s is negative \" % n) return float_n",
"about # cropping the input images. I left them here, in the case",
"\".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256, 384, 384,",
"in row and column data def constructFeatureVectors(featureImages, img): featureVectors = [] height, width",
"height) image = Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename",
"print them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename)",
"int(round(col - math.floor(W / 2))) if (initm < 0): mRange[1] += initm initm",
"the feature vectors and add in row and column data def constructFeatureVectors(featureImages, img):",
"float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print the intermediate results before clustering occurs",
"+ str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order to generate a 512x512",
"crops, in order to generate a 512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput,",
"> (width-1)): diff = ((initn + nRange[1]) - (width - 1)) nRange[1] -=",
"# Used to normalize data before clustering occurs. # Whiten sets the variance",
"check_positive_int(n): int_n = int(n) if int_n < 0: raise argparse.ArgumentTypeError(\"%s is negative\" %",
"were in the papers already. # # We can use any of the",
"# I would have made it a little more efficient, instead I just",
"1 (unit variance), # spatial weighting also takes place here. # The mean",
"def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput =",
"= ((initn + nRange[1]) - (width - 1)) nRange[1] -= diff windowHeight =",
"createGrid(nat16, outName, howManyPerRow) # # grid4 = [3,68,17,77] # howManyPerRow = 2 #",
"and column data def constructFeatureVectors(featureImages, img): featureVectors = [] height, width = img.shape",
"0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w,",
"list: newRow = [] for item in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow)",
"This is the function that checks boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row,",
"(width - 1)) nRange[1] -= diff windowHeight = mRange[1] - mRange[0] windowWidth =",
"output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h",
"of the 112 images from the Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54]",
"that were in the papers already. # # We can use any of",
"+ \".png\" brodatzCropOutput = brodatz + \"cropD\" + str(listOfBrodatzInts[i]) + \".png\" # 128x128",
"Note that I did not write this to have an exhaustive approach in",
"= \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function that checks boundaries when performing spatial",
"if (initm < 0): mRange[1] += initm initm = 0 if (initn <",
"return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)):",
"to generate a 256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts",
"brodatzCropInput = brodatz + \"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz +",
"width, height, inDir, outDir): box = (x_offset, Y_offset, width, height) image = Image.open(inDir)",
"in list] newList = [] for row in list: newRow = [] for",
"other texture. If I did so, # I would have made it a",
"for pair in texturePairs: # outName = brodatz + \"pair\" + str(count) +",
"scipy.cluster.vq as vq import argparse import glob # We can specify these if",
"input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box = (x_offset,",
"+ str(pair[1]) + \".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256,",
"= [3,68,17,77] # howManyPerRow = 2 # outName = \"grid4.png\" # createGrid(grid4, outName,",
"nRange[1] -= diff windowHeight = mRange[1] - mRange[0] windowWidth = nRange[1] - nRange[0]",
"all of the textures to every other texture. If I did so, #",
"spatialWeight copy[row][1] *= spatialWeight return copy # Create the feature vectors and add",
"outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means algorithm for",
"initn initn = 0 if(initm + mRange[1] > (height - 1)): diff =",
"subtracted if specified by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = []",
"crops, in order to generate a 256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput,",
"Print the final result, the user can also choose to make the output",
"img.size background = Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h = background.size offset = ((bg_w",
"nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W / 2))) initn = int(round(col -",
"background.paste(output, offset, img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp =",
"images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box = (x_offset, Y_offset,",
"howManyPerRow): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\"",
"last int is the circle in the middle of the image! # nat5",
"them here, in the case that Brodatz # textures were downloaded and cropped",
"128) mask = Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size,",
"colMean /= len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])): for row in range(0,",
"normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for col in range(0, len(featureVectors[0])): colMean =",
"filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks user input (i.e.",
"as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box",
"\"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- # Note",
"want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)):",
"384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs),",
"vectors and add in row and column data def constructFeatureVectors(featureImages, img): featureVectors =",
"= ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img =",
"(i.e. cannot have a negative mask size value) def check_positive_int(n): int_n = int(n)",
"# # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0 # for pair in",
"int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize data before clustering occurs. # Whiten",
"diff if(initn + nRange[1] > (width-1)): diff = ((initn + nRange[1]) - (width",
"images = [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg",
"# howManyPerRow = 4 # outName = \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) #",
"float_n #-------------------------------------------------------------------------- # All of the functions below were left here to demonstrate",
"math.floor(W / 2))) initn = int(round(col - math.floor(W / 2))) if (initm <",
"is negative \" % n) return float_n #-------------------------------------------------------------------------- # All of the functions",
"howManyPerRow) # #the last int is the circle in the middle of the",
"when performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width, W): mRange = []",
"+ naming + str(i) + \".png\", imageToPrint) i+=1 # Print the final result,",
"cv2.imwrite(printlocation + \"\\\\\" + naming + str(i) + \".png\", imageToPrint) i+=1 # Print",
"= 0 if (initn < 0): nRange[1] += initn initn = 0 if(initm",
"negative weighting value) def check_positive_float(n): float_n = float(n) if float_n < 0: raise",
"= [77,55,84,17] # circleInt = 24 # outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt,",
"# howManyPerRow = 2 # outName = \"grid4.png\" # createGrid(grid4, outName, howManyPerRow) #",
"*= spatialWeight return copy # Create the feature vectors and add in row",
"[] for row in list: newRow = [] for item in row: floatitem",
"have made it a little more efficient, instead I just decided to #",
"that checks boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width, W):",
"24 # outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # # texturePairs =",
"col in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row in range(0, len(featureVectors)): copy[row][0]",
"size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im = Image.open(brodatz",
"in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print the intermediate",
"dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz +",
"# If we want to read in some feature vectors instead of creating",
"# Normalize to intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation",
"new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box =",
"def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE))",
"raise argparse.ArgumentTypeError(\"%s is negative \" % n) return float_n #-------------------------------------------------------------------------- # All of",
"str(pair[0]) + \".png\", brodatz + \"D\" + str(pair[1]) + \".png\"] cropTexture(256, 256, 384,",
"(initn < 0): nRange[1] += initn initn = 0 if(initm + mRange[1] >",
"pair all of the textures to every other texture. If I did so,",
"cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\",",
"outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0 # for pair",
"case that Brodatz # textures were downloaded and cropped as new input images.",
"negative \" % n) return float_n #-------------------------------------------------------------------------- # All of the functions below",
"+ \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) #",
"# Whiten sets the variance to be 1 (unit variance), # spatial weighting",
"ImageDraw import os, glob import matplotlib.pyplot as pyplt import scipy.cluster.vq as vq import",
"i =0 for image in featureImages: # Normalize to intensity values imageToPrint =",
"\"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function that checks boundaries when",
"in order to generate a 512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput)",
"+ \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test",
"in the middle of the image! # nat5 = [77,55,84,17] # circleInt =",
"color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im = Image.open(brodatz +",
"not write this to have an exhaustive approach in mind, # where I",
"spatialWeight=1): means = [] for col in range(0, len(featureVectors[0])): colMean = 0 for",
"for col in range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage in featureImages:",
"1)): diff = ((initm + mRange[1]) - (height - 1)) mRange[1] -= diff",
"a 512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x",
"weighting value) def check_positive_float(n): float_n = float(n) if float_n < 0: raise argparse.ArgumentTypeError(\"%s",
"argparse.ArgumentTypeError(\"%s is negative \" % n) return float_n #-------------------------------------------------------------------------- # All of the",
"= [brodatz + \"D\" + str(pair[0]) + \".png\", brodatz + \"D\" + str(pair[1])",
"bg_h = background.size offset = ((bg_w - img_w) / 2, (bg_h - img_h)",
"\" \") f.write(\"\\n\") f.close() # If we want to read in some feature",
"variance), # spatial weighting also takes place here. # The mean can be",
"featureVectors # An extra function if we are looking to save our feature",
"f = open(outDir, 'w') for vector in featureVectors: for item in vector: f.write(str(item)",
"offset = ((bg_w - img_w) / 2, (bg_h - img_h) / 2) background.paste(output,",
"ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im = Image.open(brodatz + \"D\" + str(circleInt)",
"Checks user input (i.e. cannot have a negative mask size value) def check_positive_int(n):",
"= int(round(col - math.floor(W / 2))) if (initm < 0): mRange[1] += initm",
"copy # Create the feature vectors and add in row and column data",
"# To clean up old filter and feature images if the user chose",
"= [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\" + str(listOfBrodatzInts[i])",
"# Create test images #-------------------------------------------------------------------------- # Note that I did not write this",
"for filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images =",
"384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0,",
"= [] for i in range(len(subOuts)): dest = brodatz + \"cropRow\" + str(i)",
"did so, # I would have made it a little more efficient, instead",
"256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for x",
"nRange.append(W-1) initm = int(round(row - math.floor(W / 2))) initn = int(round(col - math.floor(W",
"initm initm = 0 if (initn < 0): nRange[1] += initn initn =",
"axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs =",
"outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row in range(0, len(labels)): for col",
"outName, howManyPerRow): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz +",
"% n) return int_n # Checks user input (i.e. cannot have a negative",
"= mRange[1] - mRange[0] windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)),",
"featureVectors.append(featureVector) return featureVectors # An extra function if we are looking to save",
"k, img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row in range(0, len(labels)):",
"the user can also choose to make the output grey def printClassifiedImage(labels, k,",
"1)) mRange[1] -= diff if(initn + nRange[1] > (width-1)): diff = ((initn +",
"return copy # Create the feature vectors and add in row and column",
"= ((bg_w - img_w) / 2, (bg_h - img_h) / 2) background.paste(output, offset,",
"feature vectors for later def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for vector",
"= 4 # outName = \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) # # grid4",
"k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels # To clean",
"math import sklearn.cluster as clstr import cv2 import numpy as np from PIL",
"in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for",
"import os, glob import matplotlib.pyplot as pyplt import scipy.cluster.vq as vq import argparse",
"the 112 images from the Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] #",
"f.close() # If we want to read in some feature vectors instead of",
"row in range(0, len(labels)): for col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col]",
"in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row in range(0, len(featureVectors)): copy[row][0] *=",
"(128, 128) mask = Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) +",
"deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks user",
"deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz + \"D\" + str(pair[0]) + \".png\",",
"the case that Brodatz # textures were downloaded and cropped as new input",
"np from PIL import Image, ImageOps, ImageDraw import os, glob import matplotlib.pyplot as",
"+ \"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat = [brodatz",
"# circleInt = 24 # outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) #",
"brodatz + \"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz + \"cropD\" +",
"\".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy all sub crops (we",
"- 1)): diff = ((initm + mRange[1]) - (height - 1)) mRange[1] -=",
"# outName = \"Nat16.png\" # createGrid(nat16, outName, howManyPerRow) # # grid4 = [3,68,17,77]",
"def check_positive_int(n): int_n = int(n) if int_n < 0: raise argparse.ArgumentTypeError(\"%s is negative\"",
"dest = brodatz + \"cropRow\" + str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz +",
"vq.whiten(featureVectors) if (setMeanToZero): for row in range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])):",
"# Note that I did not write this to have an exhaustive approach",
"+ str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size = (128,",
"cropsToConcat = [brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #--------------------------------------------------------------------------",
"brodatz + \"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName,",
"# outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]]",
"crops (we can make this optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt,",
"featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors",
"row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print the intermediate results",
"+ mRange[1]) - (height - 1)) mRange[1] -= diff if(initn + nRange[1] >",
"os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks user input (i.e. cannot have",
"output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size",
"img_w, img_h = img.size background = Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h = background.size",
"concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size = (128, 128) mask = Image.new('L', size,",
"featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra function if we are looking",
"= float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print the intermediate results before clustering",
"as vq import argparse import glob # We can specify these if need",
"\"cropD\" + str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order to generate a",
"+ str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order to generate a 256x256",
"[brodatz + \"D\" + str(pair[0]) + \".png\", brodatz + \"D\" + str(pair[1]) +",
"+ \"Nat5crop.png\", 0) size = (128, 128) mask = Image.new('L', size, color=255) draw",
"it a little more efficient, instead I just decided to # use the",
"= \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function that checks boundaries",
"size value) def check_positive_int(n): int_n = int(n) if int_n < 0: raise argparse.ArgumentTypeError(\"%s",
"int is the circle in the middle of the image! # nat5 =",
"from PIL import Image, ImageOps, ImageDraw import os, glob import matplotlib.pyplot as pyplt",
"row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return copy #",
"mRange[1] - mRange[0] windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn))",
"feature vectors and add in row and column data def constructFeatureVectors(featureImages, img): featureVectors",
"+ str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy all",
"filename.startswith(\"feature\")): os.remove(filename) # Checks user input (i.e. cannot have a negative mask size",
"in some feature vectors instead of creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n')",
"[listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i",
"str(pair[1]) + \".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256,",
"the circle in the middle of the image! # nat5 = [77,55,84,17] #",
"+ \"D\" + str(pair[0]) + \".png\", brodatz + \"D\" + str(pair[1]) + \".png\"]",
"clean up old filter and feature images if the user chose to print",
"+ \".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256, 384,",
"pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means algorithm for classification def clusterFeatureVectors(featureVectors, k):",
"# outName = \"grid4.png\" # createGrid(grid4, outName, howManyPerRow) # #the last int is",
"outName = brodatz + \"pair\" + str(count) + \".png\" # createTexturePair(pair, outName) #",
"that I did not write this to have an exhaustive approach in mind,",
"in xrange(0, len(listOfRowOutputs), 2)] dests = [] for i in range(len(subOuts)): dest =",
"= [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName = \"Nat16.png\" # createGrid(nat16, outName,",
"return featureVectors # An extra function if we are looking to save our",
"Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename in glob.glob(brodatz +",
"brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function that checks",
"#-------------------------------------------------------------------------- # Note that I did not write this to have an exhaustive",
"def cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box = (x_offset, Y_offset, width, height)",
"def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels #",
"+ mRange[1] > (height - 1)): diff = ((initm + mRange[1]) - (height",
"be 1 (unit variance), # spatial weighting also takes place here. # The",
"printFeatureImages(featureImages, naming, printlocation): i =0 for image in featureImages: # Normalize to intensity",
"concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir,",
"the functions below were left here to demonstrate how I went about #",
"brodatz + \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0)",
"a little more efficient, instead I just decided to # use the images",
"brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)]",
"+ str(i) + \".png\", imageToPrint) i+=1 # Print the final result, the user",
"final result, the user can also choose to make the output grey def",
"float(n) if float_n < 0: raise argparse.ArgumentTypeError(\"%s is negative \" % n) return",
"pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def",
"- img_w) / 2, (bg_h - img_h) / 2) background.paste(output, offset, img) background.save(brodatz",
"naming, printlocation): i =0 for image in featureImages: # Normalize to intensity values",
"the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for col in range(0,",
"can also choose to make the output grey def printClassifiedImage(labels, k, img, outdir,",
"def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row in",
"I left them here, in the case that Brodatz # textures were downloaded",
"len(featureVectors[0])): copy[row][col] -= means[col] for row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1]",
"left them here, in the case that Brodatz # textures were downloaded and",
"len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col]",
"\"D\" + str(pair[0]) + \".png\", brodatz + \"D\" + str(pair[1]) + \".png\"] cropTexture(256,",
"textures to every other texture. If I did so, # I would have",
"We can specify these if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\"",
"demonstrate how I went about # cropping the input images. I left them",
"draw.ellipse((0, 0) + size, fill=0) im = Image.open(brodatz + \"D\" + str(circleInt) +",
"+ 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background = Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h",
"brodatz + \"pair\" + str(count) + \".png\" # createTexturePair(pair, outName) # count +=",
"creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line in open(dir)] list =",
"here. # The mean can be subtracted if specified by the implementation. def",
"*= spatialWeight copy[row][1] *= spatialWeight return copy # Create the feature vectors and",
"createGridWithCircle(nat5, circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0 #",
"\".png\" # 128x128 crops, in order to generate a 256x256 image cropTexture(256, 256,",
"= outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means algorithm",
"\"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for thisImage in pathsToImages:",
"int(round(initn)) # Used to normalize data before clustering occurs. # Whiten sets the",
"-= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for row in range(0, len(featureVectors)): for",
"= (128, 128) mask = Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0)",
"our feature vectors for later def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for",
"initm = int(round(row - math.floor(W / 2))) initn = int(round(col - math.floor(W /",
"a negative weighting value) def check_positive_float(n): float_n = float(n) if float_n < 0:",
"def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\\n') for line in open(dir)] list = [i.split() for",
"384, 384, pathsToTemp[1], brodatz + \"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\", brodatz +",
"in list: newRow = [] for item in row: floatitem = float(item) newRow.append(floatitem)",
"\"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz + \"cropD\" + str(listOfBrodatzInts[i]) +",
"box = (x_offset, Y_offset, width, height) image = Image.open(inDir) crop = image.crop(box) crop.save(outDir,",
"i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput",
"all sub crops (we can make this optional if we want!) deleteCroppedImages() def",
"draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im = Image.open(brodatz + \"D\"",
"a negative mask size value) def check_positive_int(n): int_n = int(n) if int_n <",
"to # use the images that were in the papers already. # #",
"glob import matplotlib.pyplot as pyplt import scipy.cluster.vq as vq import argparse import glob",
"diff windowHeight = mRange[1] - mRange[0] windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)),",
"+ \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for thisImage in",
"range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight",
"'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background =",
"> (height - 1)): diff = ((initm + mRange[1]) - (height - 1))",
"for row in range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col] -= means[col]",
"in range(len(subOuts)): dest = brodatz + \"cropRow\" + str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i],",
"= nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize",
"in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra function if we are",
"in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return copy # Create",
"+= initm initm = 0 if (initn < 0): nRange[1] += initn initn",
"optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for",
"copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return copy # Create the feature vectors",
"did not write this to have an exhaustive approach in mind, # where",
"where I pair all of the textures to every other texture. If I",
"+ \"D\" + str(listOfBrodatzInts[i]) + \".png\" brodatzCropOutput = brodatz + \"cropD\" + str(listOfBrodatzInts[i])",
"newList.append(newRow) return newList # Print the intermediate results before clustering occurs def printFeatureImages(featureImages,",
"mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h =",
"as pyplt import scipy.cluster.vq as vq import argparse import glob # We can",
"vector: f.write(str(item) + \" \") f.write(\"\\n\") f.close() # If we want to read",
"takes place here. # The mean can be subtracted if specified by the",
"# We can specify these if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut =",
"for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + \"D\" + str(listOfBrodatzInts[i]) + \".png\"",
"result, the user can also choose to make the output grey def printClassifiedImage(labels,",
"the output grey def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape)",
"means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for row in range(0, len(featureVectors)): for col",
"functions below were left here to demonstrate how I went about # cropping",
"% n) return float_n #-------------------------------------------------------------------------- # All of the functions below were left",
"(setMeanToZero): for row in range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col] -=",
"floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print the intermediate results before",
"newRow = [] for item in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return",
"labels = kmeans.labels_ return labels # To clean up old filter and feature",
"to every other texture. If I did so, # I would have made",
"value) def check_positive_int(n): int_n = int(n) if int_n < 0: raise argparse.ArgumentTypeError(\"%s is",
"- 1)) mRange[1] -= diff if(initn + nRange[1] > (width-1)): diff = ((initn",
"make this optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs =",
"+ \"D\" + str(circleInt) + \".png\") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0,",
"in os.listdir(outputPath): if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks user input (i.e. cannot",
"below were left here to demonstrate how I went about # cropping the",
"\"outcrop2.png\") cropsToConcat = [brodatz + \"outcrop1.png\", brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages()",
"= Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background = Image.open(brodatz + \"Nat5crop.png\")",
"decided to # use the images that were in the papers already. #",
"cv2 import numpy as np from PIL import Image, ImageOps, ImageDraw import os,",
"implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for col in range(0, len(featureVectors[0])):",
"[] featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An",
"outName = \"grid4.png\" # createGrid(grid4, outName, howManyPerRow) # #the last int is the",
"+ str(circleInt) + \".png\") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz",
"len(listOfRowOutputs), 2)] dests = [] for i in range(len(subOuts)): dest = brodatz +",
"for row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero):",
"in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg",
"for row in range(0, len(labels)): for col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col]",
"before clustering occurs def printFeatureImages(featureImages, naming, printlocation): i =0 for image in featureImages:",
"clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels # To",
"int(round(row - math.floor(W / 2))) initn = int(round(col - math.floor(W / 2))) if",
"= background.size offset = ((bg_w - img_w) / 2, (bg_h - img_h) /",
"boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width, W): mRange =",
"#-------------------------------------------------------------------------- # All of the functions below were left here to demonstrate how",
"featureVectors): f = open(outDir, 'w') for vector in featureVectors: for item in vector:",
"as clstr import cv2 import numpy as np from PIL import Image, ImageOps,",
"featureVectors = [] height, width = img.shape for row in range(height): for col",
"negative\" % n) return int_n # Checks user input (i.e. cannot have a",
"range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors)",
"occurs def printFeatureImages(featureImages, naming, printlocation): i =0 for image in featureImages: # Normalize",
"str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy all sub",
"#the last int is the circle in the middle of the image! #",
"def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput =",
"brodatz + \"D\" + str(pair[1]) + \".png\"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz",
"= (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the",
"Y_offset, width, height) image = Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages():",
"filename in glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = []",
"outName, 0) # Destroy all sub crops (we can make this optional if",
"constructFeatureVectors(featureImages, img): featureVectors = [] height, width = img.shape for row in range(height):",
"im = Image.open(brodatz + \"D\" + str(circleInt) + \".png\") output = ImageOps.fit(im, mask.size,",
"listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)] dests",
"for line in open(dir)] list = [i.split() for i in list] newList =",
"2)] dests = [] for i in range(len(subOuts)): dest = brodatz + \"cropRow\"",
"cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\" + naming + str(i) +",
"here, in the case that Brodatz # textures were downloaded and cropped as",
"= \"grid4.png\" # createGrid(grid4, outName, howManyPerRow) # #the last int is the circle",
"import glob # We can specify these if need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\"",
"in texturePairs: # outName = brodatz + \"pair\" + str(count) + \".png\" #",
"/ 2) background.paste(output, offset, img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName):",
"Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im =",
"str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order to generate a 512x512 image",
"# An extra function if we are looking to save our feature vectors",
"Whiten sets the variance to be 1 (unit variance), # spatial weighting also",
"\"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size =",
"Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h = background.size offset = ((bg_w - img_w) /",
"results before clustering occurs def printFeatureImages(featureImages, naming, printlocation): i =0 for image in",
"in range(height): for col in range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage",
"image! # nat5 = [77,55,84,17] # circleInt = 24 # outName = 'Nat5.png'",
"-= means[col] for row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight",
"- math.floor(W / 2))) initn = int(round(col - math.floor(W / 2))) if (initm",
"128x128 crops, in order to generate a 512x512 image cropTexture(256, 256, 384, 384,",
"more efficient, instead I just decided to # use the images that were",
"circleInt, outName): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz +",
"+ \".png\") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png',",
"col in range(0, len(featureVectors[0])): colMean = 0 for row in range(0, len(featureVectors)): colMean",
"'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background = Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h =",
"so, # I would have made it a little more efficient, instead I",
"for col in range(0, len(featureVectors[0])): colMean = 0 for row in range(0, len(featureVectors)):",
"featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])): for row in",
"# All of the functions below were left here to demonstrate how I",
"nat5 = [77,55,84,17] # circleInt = 24 # outName = 'Nat5.png' # createGridWithCircle(nat5,",
"naming + str(i) + \".png\", imageToPrint) i+=1 # Print the final result, the",
"a 256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x",
"outName, howManyPerRow) # #the last int is the circle in the middle of",
"import scipy.cluster.vq as vq import argparse import glob # We can specify these",
"argparse.ArgumentTypeError(\"%s is negative\" % n) return int_n # Checks user input (i.e. cannot",
"how I went about # cropping the input images. I left them here,",
"+= initn initn = 0 if(initm + mRange[1] > (height - 1)): diff",
"256, 384, 384, pathsToTemp[0], brodatz + \"outcrop1.png\") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz",
"(unit variance), # spatial weighting also takes place here. # The mean can",
"[] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W",
"1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy all sub crops (we can",
"choose to make the output grey def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput):",
"Image, ImageOps, ImageDraw import os, glob import matplotlib.pyplot as pyplt import scipy.cluster.vq as",
"variance to be 1 (unit variance), # spatial weighting also takes place here.",
"for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i in range(len(subOuts)): dest",
"+ \"Nat5crop.png\") bg_w, bg_h = background.size offset = ((bg_w - img_w) / 2,",
"in order to generate a 256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput)",
"str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size = (128, 128)",
"for col in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row in range(0, len(featureVectors)):",
"circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0 # for",
"x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i in range(len(subOuts)): dest =",
"We can use any of the 112 images from the Brodatz album here",
"featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra function",
"# Call the k means algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans =",
"for row in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for",
"int(n) if int_n < 0: raise argparse.ArgumentTypeError(\"%s is negative\" % n) return int_n",
"glob.glob(brodatz + \"*crop*\"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for thisImage",
"< 0: raise argparse.ArgumentTypeError(\"%s is negative\" % n) return int_n # Checks user",
"write this to have an exhaustive approach in mind, # where I pair",
"papers already. # # We can use any of the 112 images from",
"img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz +",
"mRange[0] windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used",
"copy[row][col] -= means[col] for row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *=",
"and feature images if the user chose to print them. def deleteExistingSubResults(outputPath): for",
"(255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k",
"to be 1 (unit variance), # spatial weighting also takes place here. #",
"0: raise argparse.ArgumentTypeError(\"%s is negative \" % n) return float_n #-------------------------------------------------------------------------- # All",
"width, height) image = Image.open(inDir) crop = image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for",
"cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for",
"labels.reshape(img.shape) for row in range(0, len(labels)): for col in range(0, len(labels[0])): outputIntensity =",
"clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels # To clean up old filter",
"clstr import cv2 import numpy as np from PIL import Image, ImageOps, ImageDraw",
"be subtracted if specified by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means =",
"#-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box = (x_offset, Y_offset, width,",
"images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts,",
"brodatz + \"cropRow\" + str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" +",
"can make this optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs",
"# The mean can be subtracted if specified by the implementation. def normalizeData(featureVectors,",
"generate a 256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts =",
"Image.open(brodatz + \"D\" + str(circleInt) + \".png\") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))",
"vector in featureVectors: for item in vector: f.write(str(item) + \" \") f.write(\"\\n\") f.close()",
"nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize data before clustering",
"if(greyOutput): labels = labels.reshape(img.shape) for row in range(0, len(labels)): for col in range(0,",
"specified by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for col",
"if (filename.startswith(\"filter\") or filename.startswith(\"feature\")): os.remove(filename) # Checks user input (i.e. cannot have a",
"brodatz + \"cropD\" + str(listOfBrodatzInts[i]) + \".png\" # 128x128 crops, in order to",
"spatialWeight return copy # Create the feature vectors and add in row and",
"imageToPrint) i+=1 # Print the final result, the user can also choose to",
"+ \"cropRow\" + str(i) + \".png\" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + \"cropRow\" + str(i)",
"+ \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy all sub crops",
"for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra function if",
"newRow.append(floatitem) newList.append(newRow) return newList # Print the intermediate results before clustering occurs def",
"return float_n #-------------------------------------------------------------------------- # All of the functions below were left here to",
"# cropping the input images. I left them here, in the case that",
"mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W / 2))) initn =",
"+ \"cropRow\" + str(i) + \".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size",
"here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName = \"Nat16.png\"",
"outdir, axisType): images = [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images,",
"range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row in",
"cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName,",
"featureVectors: for item in vector: f.write(str(item) + \" \") f.write(\"\\n\") f.close() # If",
"extra function if we are looking to save our feature vectors for later",
"the intermediate results before clustering occurs def printFeatureImages(featureImages, naming, printlocation): i =0 for",
"Image.open(brodatz + 'circlecrop.png').convert(\"RGBA\") img_w, img_h = img.size background = Image.open(brodatz + \"Nat5crop.png\") bg_w,",
"img_h = img.size background = Image.open(brodatz + \"Nat5crop.png\") bg_w, bg_h = background.size offset",
"+= featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])): for row",
"of the image! # nat5 = [77,55,84,17] # circleInt = 24 # outName",
"howManyPerRow)] dests = [] for i in range(len(subOuts)): dest = brodatz + \"cropRow\"",
"weighting also takes place here. # The mean can be subtracted if specified",
"list] newList = [] for row in list: newRow = [] for item",
"intermediate results before clustering occurs def printFeatureImages(featureImages, naming, printlocation): i =0 for image",
"mRange = [] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row",
"Normalize to intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation +",
"+ 2] for x in xrange(0, len(listOfRowOutputs), 2)] dests = [] for i",
"= Image.open(brodatz + \"D\" + str(circleInt) + \".png\") output = ImageOps.fit(im, mask.size, centering=(0.5,",
"img_h) / 2) background.paste(output, offset, img) background.save(brodatz + outName, format=\"png\") deleteCroppedImages() def createTexturePair(pair,",
"+ \" \") f.write(\"\\n\") f.close() # If we want to read in some",
"len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) #",
"brodatz + \"outcrop2.png\"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #--------------------------------------------------------------------------",
"also takes place here. # The mean can be subtracted if specified by",
"range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for row in",
"row in range(height): for col in range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for",
"[] for col in range(0, len(featureVectors[0])): colMean = 0 for row in range(0,",
"= open(outDir, 'w') for vector in featureVectors: for item in vector: f.write(str(item) +",
"user can also choose to make the output grey def printClassifiedImage(labels, k, img,",
"looking to save our feature vectors for later def printFeatureVectors(outDir, featureVectors): f =",
"labels # To clean up old filter and feature images if the user",
"\" % n) return float_n #-------------------------------------------------------------------------- # All of the functions below were",
"createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz",
"labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means",
"1)) nRange[1] -= diff windowHeight = mRange[1] - mRange[0] windowWidth = nRange[1] -",
"\".png\", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + \"Nat5crop.png\", 0) size = (128, 128) mask =",
"return labels # To clean up old filter and feature images if the",
"for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE)",
"this optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = []",
"crop = image.crop(box) crop.save(outDir, \"PNG\") def deleteCroppedImages(): for filename in glob.glob(brodatz + \"*crop*\"):",
"convolution. def getRanges_for_window_with_adjust(row, col, height, width, W): mRange = [] nRange = []",
"by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for col in",
"clustering occurs def printFeatureImages(featureImages, naming, printlocation): i =0 for image in featureImages: #",
"createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz",
"data before clustering occurs. # Whiten sets the variance to be 1 (unit",
"[listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)] dests = [] for",
"matplotlib.pyplot as pyplt import scipy.cluster.vq as vq import argparse import glob # We",
"already. # # We can use any of the 112 images from the",
"of the functions below were left here to demonstrate how I went about",
"printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row in range(0,",
"Used to normalize data before clustering occurs. # Whiten sets the variance to",
"pair in texturePairs: # outName = brodatz + \"pair\" + str(count) + \".png\"",
"to intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + \"\\\\\"",
"setMeanToZero, spatialWeight=1): means = [] for col in range(0, len(featureVectors[0])): colMean = 0",
"mind, # where I pair all of the textures to every other texture.",
"need be. brodatz = \"D:\\\\ImageProcessing\\\\project\\\\OriginalBrodatz\\\\\" concatOut = \"D:\\\\ImageProcessing\\\\project\\\\concat.png\" # This is the function",
"add in row and column data def constructFeatureVectors(featureImages, img): featureVectors = [] height,"
] |
[
"da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len",
"type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int)",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"defaultdict from kmedoids import kMedoids from scipy.spatial.distance import pdist, squareform from sklearn.metrics import",
"doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb'))",
"# C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len for idx in C[1]: lbs[idx]",
"else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix,",
"1 for i in clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true,",
"if word is None: words.append('UNK') else: words.append(word) return ' '.join(words) def __to_labels(self, C,",
"accuracy_score from pairwise_classifier import * class MixtureReader: def __init__(self, data_dir, data_type, context): assert",
"= result else: acc, prd = result out_file.write('FILE ID: ' + str(filename) +",
"self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context']",
"out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames,",
"is not None: # clear out file for new writing. out_file = open(self.out_file_path,",
"else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None) result = self.evaluate_single(doc_mix, doc_lbs)",
"= result out_file.write('FILE ID: ' + str(filename) + '\\n') for prd_lb, true_lb, indices",
"== \"__main__\": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int)",
"self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self,",
"'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch':",
"for idx in C[1]: lbs[idx] = 1 return lbs def __flip_clust(self, clust): return",
"return_pred: return acc, doc_prd return acc def evaluate_rand(self, k=100, verbose=True): accs = []",
"'+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc",
"language governing permissions and # limitations under the License. # ============================================================================== import os",
"this file except in compliance with the License. # You may obtain a",
"x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']:",
"np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf,",
"from collections import defaultdict from kmedoids import kMedoids from scipy.spatial.distance import pdist, squareform",
"self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs",
"psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir']",
"{self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores,",
"assert data_type in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type = data_type self.context =",
"MixtureReader: def __init__(self, data_dir, data_type, context): assert data_type in ['nyt', 'wiki'] self.data_dir =",
"doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode in doc_mix: if sentcode",
"self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = [] for",
"ANY KIND, either express or implied. # See the License for the specific",
"filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not None: # clear out",
"kMedoids from scipy.spatial.distance import pdist, squareform from sklearn.metrics import accuracy_score from pairwise_classifier import",
"data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else",
"print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True): accs = []",
"parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size,",
"in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def",
"parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id',",
"config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob':",
"batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx",
"type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool)",
"x2): x1, x1_len = batch([x1]) x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len,",
"args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir,",
"{}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if",
"parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove',",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"i in clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def",
"self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]])",
"= self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd return acc def evaluate_rand(self, k=100,",
"self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def",
"type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str)",
"parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context',",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"| '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}: acc",
"parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path',",
"in clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self,",
"= ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)),",
"os import sys sys.path.insert(0, os.getcwd()) import time import random import shutil import dill",
"doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc)",
"data_dir, data_type, context): assert data_type in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type =",
"= batch([x1]) x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0}",
"helpers import Indexer, batch, checkpoint_model from itertools import chain, product from collections import",
"OF ANY KIND, either express or implied. # See the License for the",
"doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf =",
"accs = [] if self.out_file_path is not None: # clear out file for",
"'+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}:",
"as np import tensorflow as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from",
"Copyright 2018 @<NAME>. All Rights Reserved. # # Licensed under the Apache License,",
"doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids: def",
"doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc =",
"if self.out_file_path is None: acc = result else: acc, prd = result out_file.write('FILE",
"ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not",
"acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if __name__",
"__get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode",
"np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def",
"doc_len): # C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len for idx in C[1]:",
"'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose':",
"args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir,",
"parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size,",
"args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention,",
"_, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd)",
"'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf, data_type='nyt') print('\\n') sample_files = os.listdir('nyt_sample/')",
"args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir,",
"__name__ == \"__main__\": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size',",
"ctx return doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf",
"# ============================================================================== import os import sys sys.path.insert(0, os.getcwd()) import time import random import",
"if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path =",
"import numpy as np import tensorflow as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell,",
"in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix))",
"return np.array([0 if i==1 else 1 for i in clust]) def __clust_accuracy(self, true,",
"def evaluate_rand(self, k=100, verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if",
"args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf, data_type='nyt') print('\\n')",
"data_type self.context = context # int: 0 or context-length. def get_mixture(self, filename): if",
"All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the",
"avg_acc if __name__ == \"__main__\": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size',",
"parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name',",
"parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int)",
"len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return",
"not None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc = result",
"type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float)",
"== 'nyt': return self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename):",
"C, doc_len): # C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len for idx in",
"None: words.append('UNK') else: words.append(word) return ' '.join(words) def __to_labels(self, C, doc_len): # C:",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"= {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf =",
"type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args =",
"parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args()",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"sys sys.path.insert(0, os.getcwd()) import time import random import shutil import dill import numpy",
"self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc = result else: acc, prd =",
"from scipy.spatial.distance import pdist, squareform from sklearn.metrics import accuracy_score from pairwise_classifier import *",
"return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs =",
"self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs)",
"and # limitations under the License. # ============================================================================== import os import sys sys.path.insert(0,",
"out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc =",
"parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq',",
"filenames, verbose=True): accs = [] if self.out_file_path is not None: # clear out",
"parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir',",
"doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix))",
"= data_type self.context = context # int: 0 or context-length. def get_mixture(self, filename):",
"= self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc = result",
"doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"ctx is not None: self.ctx = ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq =",
"'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config)",
"return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx is not",
"doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if",
"words.append('UNK') else: words.append(word) return ' '.join(words) def __to_labels(self, C, doc_len): # C: {cls:[datum_id,",
"args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch,",
"doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result =",
"return self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db,",
"doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx is not None: self.ctx = ctx",
"doc_lbs) if out_file_path is None: acc = result else: acc, prd = result",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"import random import shutil import dill import numpy as np import tensorflow as",
"self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else: words.append(word) return ' '.join(words) def __to_labels(self,",
"shutil import dill import numpy as np import tensorflow as tf from tensorflow.contrib.rnn",
"parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config =",
"under the License. # ============================================================================== import os import sys sys.path.insert(0, os.getcwd()) import time",
"in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n')",
"np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if __name__ == \"__main__\": import argparse",
"in indices: word = self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else: words.append(word) return",
"required by applicable law or agreed to in writing, software # distributed under",
"{}: acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc))",
"args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context,",
"applicable law or agreed to in writing, software # distributed under the License",
"= np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if __name__ == \"__main__\": import",
"not None: # clear out file for new writing. out_file = open(self.out_file_path, 'w')",
"parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track',",
"if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat",
"parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config = {'batch_size': args.batch_size,",
"doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader =",
"class MixtureReader: def __init__(self, data_dir, data_type, context): assert data_type in ['nyt', 'wiki'] self.data_dir",
"= list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return",
"return doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader",
"or agreed to in writing, software # distributed under the License is distributed",
"else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None:",
"in doc_mix: if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN =",
"LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer, batch, checkpoint_model from itertools import chain,",
"true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+'",
"pdist, squareform from sklearn.metrics import accuracy_score from pairwise_classifier import * class MixtureReader: def",
"lbs = [0]*doc_len for idx in C[1]: lbs[idx] = 1 return lbs def",
"args = parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer,",
"return acc, doc_prd return acc def evaluate_rand(self, k=100, verbose=True): accs = [] filenames",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if",
"method='average', return_pred=True): if ctx is not None: self.ctx = ctx doc_mix_sq, _ =",
"__init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in",
"self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd)",
"= {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True): accs = [] if self.out_file_path",
"in C[1]: lbs[idx] = 1 return lbs def __flip_clust(self, clust): return np.array([0 if",
"= self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None,",
"np.array([0 if i==1 else 1 for i in clust]) def __clust_accuracy(self, true, pred):",
"= kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred:",
"'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove':",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"= np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not None: # clear out file",
"pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len = batch([x1]) x2, x2_len",
"| '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close()",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose:",
"None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc = result else:",
"clear out file for new writing. out_file = open(self.out_file_path, 'w') for filename in",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"None: acc = result else: acc, prd = result out_file.write('FILE ID: ' +",
"License. # You may obtain a copy of the License at # #",
"words = [] for index in indices: word = self.psc_clf.indexer.get_object(index) if word is",
"doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path",
"= [] if self.out_file_path is not None: # clear out file for new",
"parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size',",
"_ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd =",
"is not None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc =",
"import Indexer, batch, checkpoint_model from itertools import chain, product from collections import defaultdict",
"return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs =",
"result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc = result else: acc,",
"{}'.format(avg_acc)) return avg_acc if __name__ == \"__main__\": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size',",
"if __name__ == \"__main__\": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int)",
"* class MixtureReader: def __init__(self, data_dir, data_type, context): assert data_type in ['nyt', 'wiki']",
"get_mixture(self, filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: # == wiki return",
"filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix,",
"{}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def",
"'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context':",
"compliance with the License. # You may obtain a copy of the License",
"= self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename)",
"dill import numpy as np import tensorflow as tf from tensorflow.contrib.rnn import LSTMCell,",
"return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len = batch([x1])",
"+ str(filename) + '\\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE",
"============================================================================== import os import sys sys.path.insert(0, os.getcwd()) import time import random import shutil",
"kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return",
"import dill import numpy as np import tensorflow as tf from tensorflow.contrib.rnn import",
"if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx =",
"limitations under the License. # ============================================================================== import os import sys sys.path.insert(0, os.getcwd()) import",
"if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self,",
"def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx is not None: self.ctx",
"tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer, batch, checkpoint_model",
"indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' |",
"{}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True): accs = [] if self.out_file_path is",
"replace=False) if self.out_file_path is not None: # clear out file for new writing.",
"self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = []",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"import kMedoids from scipy.spatial.distance import pdist, squareform from sklearn.metrics import accuracy_score from pairwise_classifier",
"out_file_path is None: acc = result else: acc, prd = result out_file.write('FILE ID:",
"accuracy = {}'.format(avg_acc)) return avg_acc if __name__ == \"__main__\": import argparse parser =",
"'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path':",
"if ctx is not None: self.ctx = ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq",
"evaluate_given(self, filenames, verbose=True): accs = [] if self.out_file_path is not None: # clear",
"evaluate_rand(self, k=100, verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path",
"= {}'.format(avg_acc)) return avg_acc if __name__ == \"__main__\": import argparse parser = argparse.ArgumentParser()",
"len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd return acc def",
"doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename):",
"else: words.append(word) return ' '.join(words) def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...],",
"else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = [] for index",
"parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path',",
"lbs def __flip_clust(self, clust): return np.array([0 if i==1 else 1 for i in",
"not use this file except in compliance with the License. # You may",
"return lbs def __flip_clust(self, clust): return np.array([0 if i==1 else 1 for i",
"self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return",
"filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context doc_mix_flat =",
"doc_lbs, ctx=None, method='average', return_pred=True): if ctx is not None: self.ctx = ctx doc_mix_sq,",
"parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir',",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"'\\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' |",
"collections import defaultdict from kmedoids import kMedoids from scipy.spatial.distance import pdist, squareform from",
"parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name',",
"args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove,",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt'",
"doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs,",
"acc = result else: acc, prd = result out_file.write('FILE ID: ' + str(filename)",
"0 or context-length. def get_mixture(self, filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else:",
"type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str)",
"self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path",
"result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs =",
"'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size':",
"...} lbs = [0]*doc_len for idx in C[1]: lbs[idx] = 1 return lbs",
"verbose=True): accs = [] if self.out_file_path is not None: # clear out file",
"None: self.ctx = ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust",
"# you may not use this file except in compliance with the License.",
"2018 @<NAME>. All Rights Reserved. # # Licensed under the Apache License, Version",
"for index in indices: word = self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else:",
"not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None) result =",
"agreed to in writing, software # distributed under the License is distributed on",
"args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path,",
"str(filename) + '\\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE =",
"parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved',",
"self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result",
"None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None) result = self.evaluate_single(doc_mix,",
"sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat =",
"(the \"License\"); # you may not use this file except in compliance with",
"= [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not None: #",
"chain, product from collections import defaultdict from kmedoids import kMedoids from scipy.spatial.distance import",
"== wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb'))",
"= np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs",
"x1, x2): x1, x1_len = batch([x1]) x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1,",
"args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate,",
"= context # int: 0 or context-length. def get_mixture(self, filename): if self.data_type ==",
"sys.path.insert(0, os.getcwd()) import time import random import shutil import dill import numpy as",
"acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd return acc def evaluate_rand(self,",
"ID: ' + str(filename) + '\\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs,",
"parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args",
"args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track,",
"if out_file_path is None: acc = result else: acc, prd = result out_file.write('FILE",
"# Unless required by applicable law or agreed to in writing, software #",
"else: doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat)",
"doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len =",
"by applicable law or agreed to in writing, software # distributed under the",
"= '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename,",
"evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx is not None: self.ctx =",
"return ' '.join(words) def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...} lbs",
"else: acc, prd = result out_file.write('FILE ID: ' + str(filename) + '\\n') for",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"for i in clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred)))",
"type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int)",
"@<NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0",
"file for new writing. out_file = open(self.out_file_path, 'w') for filename in filenames: if",
"self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len = batch([x1]) x2, x2_len = batch([x2])",
"np import tensorflow as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers",
"lbs[idx] = 1 return lbs def __flip_clust(self, clust): return np.array([0 if i==1 else",
"return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context:",
"parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose',",
"type=str) args = parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer':",
"'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path':",
"permissions and # limitations under the License. # ============================================================================== import os import sys",
"file except in compliance with the License. # You may obtain a copy",
"type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool)",
"[] for sentcode in doc_mix: if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if",
"self.context = context # int: 0 or context-length. def get_mixture(self, filename): if self.data_type",
"type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str)",
"'w') for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result",
"tensorflow as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer,",
"# Copyright 2018 @<NAME>. All Rights Reserved. # # Licensed under the Apache",
"prd = result out_file.write('FILE ID: ' + str(filename) + '\\n') for prd_lb, true_lb,",
"License. # ============================================================================== import os import sys sys.path.insert(0, os.getcwd()) import time import random",
"= self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc = result else: acc, prd",
"License for the specific language governing permissions and # limitations under the License.",
"data_type in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type = data_type self.context = context",
"indices: word = self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else: words.append(word) return '",
"to in writing, software # distributed under the License is distributed on an",
"= self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename,",
"def __flip_clust(self, clust): return np.array([0 if i==1 else 1 for i in clust])",
"implied. # See the License for the specific language governing permissions and #",
"type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str)",
"\"License\"); # you may not use this file except in compliance with the",
"i==1 else 1 for i in clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true,",
"Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name':",
"if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids:",
"not None: self.ctx = ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _,",
"+ '\\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+'",
"= self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd return",
"args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name,",
"type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str)",
"type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size':",
"self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0]",
"__clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1,",
"from itertools import chain, product from collections import defaultdict from kmedoids import kMedoids",
"out file for new writing. out_file = open(self.out_file_path, 'w') for filename in filenames:",
"= self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc = result else: acc, prd",
"print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if __name__ == \"__main__\": import argparse parser",
"if verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage",
"' + str(filename) + '\\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix):",
"import shutil import dill import numpy as np import tensorflow as tf from",
"the License. # ============================================================================== import os import sys sys.path.insert(0, os.getcwd()) import time import",
"or implied. # See the License for the specific language governing permissions and",
"self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = [] for index in indices:",
"= parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size':",
"result out_file.write('FILE ID: ' + str(filename) + '\\n') for prd_lb, true_lb, indices in",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"is None: acc = result else: acc, prd = result out_file.write('FILE ID: '",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None)",
"'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf, data_type='nyt')",
"open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename)",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"import os import sys sys.path.insert(0, os.getcwd()) import time import random import shutil import",
"'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention':",
"in writing, software # distributed under the License is distributed on an \"AS",
"result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc = result else: acc,",
"doc_prd) if return_pred: return acc, doc_prd return acc def evaluate_rand(self, k=100, verbose=True): accs",
"type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str)",
"for new writing. out_file = open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context:",
"1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx is not None:",
"class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt'",
"'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq':",
"new writing. out_file = open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context: doc_mix,",
"type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size':",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = []",
"time import random import shutil import dill import numpy as np import tensorflow",
"= [] for index in indices: word = self.psc_clf.indexer.get_object(index) if word is None:",
"'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved':",
"verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not",
"if return_pred: return acc, doc_prd return acc def evaluate_rand(self, k=100, verbose=True): accs =",
"specific language governing permissions and # limitations under the License. # ============================================================================== import",
"' '.join(words) def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...} lbs =",
"fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf",
"word = self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else: words.append(word) return ' '.join(words)",
"filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode in",
"type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool)",
"doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename,",
"__to_sentence(self, indices): words = [] for index in indices: word = self.psc_clf.indexer.get_object(index) if",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"import defaultdict from kmedoids import kMedoids from scipy.spatial.distance import pdist, squareform from sklearn.metrics",
"you may not use this file except in compliance with the License. #",
"np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs",
"self.out_file_path is not None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc",
"self.mix_reader.get_mixture(filename, self.out_file_path is not None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None:",
"doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)])",
"argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size',",
"fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs,",
"args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed",
"# clear out file for new writing. out_file = open(self.out_file_path, 'w') for filename",
"'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words",
"is None: words.append('UNK') else: words.append(word) return ' '.join(words) def __to_labels(self, C, doc_len): #",
"use this file except in compliance with the License. # You may obtain",
"size=k, replace=False) if self.out_file_path is not None: # clear out file for new",
"args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq,",
"acc, prd = result out_file.write('FILE ID: ' + str(filename) + '\\n') for prd_lb,",
"ctx=None, method='average', return_pred=True): if ctx is not None: self.ctx = ctx doc_mix_sq, _",
"filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename)",
"doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not",
"writing. out_file = open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs,",
"def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len for",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"else: # == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix =",
"= {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc",
"if i==1 else 1 for i in clust]) def __clust_accuracy(self, true, pred): return",
"C[1]: lbs[idx] = 1 return lbs def __flip_clust(self, clust): return np.array([0 if i==1",
"# == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename,",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len = batch([x1]) x2, x2_len =",
"type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int)",
"indices): words = [] for index in indices: word = self.psc_clf.indexer.get_object(index) if word",
"from sklearn.metrics import accuracy_score from pairwise_classifier import * class MixtureReader: def __init__(self, data_dir,",
"self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx",
"args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir,",
"dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat)",
"for the specific language governing permissions and # limitations under the License. #",
"# limitations under the License. # ============================================================================== import os import sys sys.path.insert(0, os.getcwd())",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"def __init__(self, data_dir, data_type, context): assert data_type in ['nyt', 'wiki'] self.data_dir = data_dir",
"= [] for sentcode in doc_mix: if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1)",
"return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type):",
"= '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File",
"in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type = data_type self.context = context #",
"doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd",
"self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc = result else: acc, prd =",
"from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer, batch, checkpoint_model from",
"# # Unless required by applicable law or agreed to in writing, software",
"'rb')) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx",
"conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True):",
"express or implied. # See the License for the specific language governing permissions",
"data_type, context): assert data_type in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type = data_type",
"= MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else",
"k=100, verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is",
"int: 0 or context-length. def get_mixture(self, filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename)",
"self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix",
"def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2):",
"feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx is",
"result else: acc, prd = result out_file.write('FILE ID: ' + str(filename) + '\\n')",
"print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy =",
"self.data_type = data_type self.context = context # int: 0 or context-length. def get_mixture(self,",
"parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir',",
"either express or implied. # See the License for the specific language governing",
"= psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length']",
"PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if",
"ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix,",
"pairwise_classifier import * class MixtureReader: def __init__(self, data_dir, data_type, context): assert data_type in",
"sklearn.metrics import accuracy_score from pairwise_classifier import * class MixtureReader: def __init__(self, data_dir, data_type,",
"type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str)",
"self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self,",
"doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf",
"args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path}",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path",
"is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None) result",
"= 1 return lbs def __flip_clust(self, clust): return np.array([0 if i==1 else 1",
"da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode in doc_mix:",
"__dist(self, x1, x2): x1, x1_len = batch([x1]) x2, x2_len = batch([x2]) fd =",
"if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename) def",
"import * class MixtureReader: def __init__(self, data_dir, data_type, context): assert data_type in ['nyt',",
"'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir':",
"= psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = [] for index in indices: word",
"the License. # You may obtain a copy of the License at #",
"import chain, product from collections import defaultdict from kmedoids import kMedoids from scipy.spatial.distance",
"doc_prd return acc def evaluate_rand(self, k=100, verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES,",
"= self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix,",
"import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer, batch, checkpoint_model from itertools import",
"= {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob,",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"else 1 for i in clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred),",
"args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name,",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0)",
"self.ctx = ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust =",
"doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc",
"args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf =",
"{'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate':",
"import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int)",
"parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir',",
"= data_dir self.data_type = data_type self.context = context # int: 0 or context-length.",
"batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix))",
"__init__(self, data_dir, data_type, context): assert data_type in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type",
"...], ...} lbs = [0]*doc_len for idx in C[1]: lbs[idx] = 1 return",
"clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1,",
"return avg_acc def evaluate_given(self, filenames, verbose=True): accs = [] if self.out_file_path is not",
"self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else",
"args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose,",
"args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id,",
"type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config = {'batch_size':",
"doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc",
"= doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc =",
"pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len =",
"'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track':",
"with the License. # You may obtain a copy of the License at",
"parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length',",
"x1_len = batch([x1]) x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len,",
"[] for index in indices: word = self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK')",
"def get_mixture(self, filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: # == wiki",
"parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch',",
"clust): return np.array([0 if i==1 else 1 for i in clust]) def __clust_accuracy(self,",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED",
"doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is",
"as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer, batch,",
"scipy.spatial.distance import pdist, squareform from sklearn.metrics import accuracy_score from pairwise_classifier import * class",
"doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None)",
"'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length':",
"accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs)",
"'.join(words) def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len",
"args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf, data_type='nyt') print('\\n') sample_files = os.listdir('nyt_sample/') kmed.evaluate_given(sample_files)",
"tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer, batch, checkpoint_model from itertools",
"verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy",
"law or agreed to in writing, software # distributed under the License is",
"the License for the specific language governing permissions and # limitations under the",
"kmedoids import kMedoids from scipy.spatial.distance import pdist, squareform from sklearn.metrics import accuracy_score from",
"doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx",
"list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix,",
"out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if __name__ ==",
"[0]*doc_len for idx in C[1]: lbs[idx] = 1 return lbs def __flip_clust(self, clust):",
"governing permissions and # limitations under the License. # ============================================================================== import os import",
"CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if",
"avg_acc def evaluate_given(self, filenames, verbose=True): accs = [] if self.out_file_path is not None:",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2)",
"\"__main__\": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer',",
"Indexer, batch, checkpoint_model from itertools import chain, product from collections import defaultdict from",
"import time import random import shutil import dill import numpy as np import",
"random import shutil import dill import numpy as np import tensorflow as tf",
"self.out_file_path is not None: # clear out file for new writing. out_file =",
"self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc = result else:",
"self.out_file_path is None: acc = result else: acc, prd = result out_file.write('FILE ID:",
"args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size,",
"import accuracy_score from pairwise_classifier import * class MixtureReader: def __init__(self, data_dir, data_type, context):",
"for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result =",
"[] if self.out_file_path is not None: # clear out file for new writing.",
"def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context",
"np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not None: # clear out file for",
"if self.out_file_path is not None: # clear out file for new writing. out_file",
"Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return",
"in compliance with the License. # You may obtain a copy of the",
"type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config",
"else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids: def __init__(self,",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"def __dist(self, x1, x2): x1, x1_len = batch([x1]) x2, x2_len = batch([x2]) fd",
"context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words =",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"self.data_dir = data_dir self.data_type = data_type self.context = context # int: 0 or",
"context # int: 0 or context-length. def get_mixture(self, filename): if self.data_type == 'nyt':",
"import sys sys.path.insert(0, os.getcwd()) import time import random import shutil import dill import",
"def __get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for",
"parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size,",
"doc_mix: if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN = self.context",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"# int: 0 or context-length. def get_mixture(self, filename): if self.data_type == 'nyt': return",
"ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix,",
"os.getcwd()) import time import random import shutil import dill import numpy as np",
"C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len for idx in C[1]: lbs[idx] =",
"= batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust,",
"from helpers import Indexer, batch, checkpoint_model from itertools import chain, product from collections",
"'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf, data_type='nyt') print('\\n') sample_files",
"See the License for the specific language governing permissions and # limitations under",
"ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"= self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if",
"None: # clear out file for new writing. out_file = open(self.out_file_path, 'w') for",
"args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length,",
"'+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename, acc))",
"= self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else: words.append(word) return ' '.join(words) def",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"is not None: self.ctx = ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T",
"[] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not None: # clear",
"batch([x1]) x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True):",
"acc, doc_prd return acc def evaluate_rand(self, k=100, verbose=True): accs = [] filenames =",
"return avg_acc if __name__ == \"__main__\": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int)",
"doc_lbs = [] for sentcode in doc_mix: if sentcode in da: doc_lbs.append(0) else:",
"context-length. def get_mixture(self, filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: # ==",
"= len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx",
"np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class",
"batch, checkpoint_model from itertools import chain, product from collections import defaultdict from kmedoids",
"doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if",
"'nyt': return self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da,",
"DropoutWrapper from helpers import Indexer, batch, checkpoint_model from itertools import chain, product from",
"parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate',",
"doc_lbs) if self.out_file_path is None: acc = result else: acc, prd = result",
"args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf, data_type='nyt') print('\\n') sample_files =",
"= self.mix_reader.get_mixture(filename, self.out_file_path is not None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is",
"'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir':",
"= open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx =",
"squareform from sklearn.metrics import accuracy_score from pairwise_classifier import * class MixtureReader: def __init__(self,",
"__to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len for idx",
"= dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode in doc_mix: if sentcode in",
"doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN =",
"def __to_sentence(self, indices): words = [] for index in indices: word = self.psc_clf.indexer.get_object(index)",
"word is None: words.append('UNK') else: words.append(word) return ' '.join(words) def __to_labels(self, C, doc_len):",
"'+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n') accs.append(acc) if verbose: print('File {}: acc =",
"MultiRNNCell, DropoutWrapper from helpers import Indexer, batch, checkpoint_model from itertools import chain, product",
"type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int)",
"type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str)",
"type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool)",
"self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki',",
"'wiki'] self.data_dir = data_dir self.data_type = data_type self.context = context # int: 0",
"wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs",
"__flip_clust(self, clust): return np.array([0 if i==1 else 1 for i in clust]) def",
"accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True): accs = [] if",
"self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd return acc def evaluate_rand(self, k=100, verbose=True):",
"is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if",
"prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED =",
"args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf,",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"sentcode in doc_mix: if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN",
"doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None) result = self.evaluate_single(doc_mix, doc_lbs) if",
"except in compliance with the License. # You may obtain a copy of",
"None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is",
"the specific language governing permissions and # limitations under the License. # ==============================================================================",
"zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\\n') out_file.write('\\n\\n')",
"context): assert data_type in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type = data_type self.context",
"self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix,",
"'rb')) doc_lbs = [] for sentcode in doc_mix: if sentcode in da: doc_lbs.append(0)",
"accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not None:",
"__get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context doc_mix_flat",
"return_pred=True): if ctx is not None: self.ctx = ctx doc_mix_sq, _ = batch(doc_mix)",
"out_file.write('FILE ID: ' + str(filename) + '\\n') for prd_lb, true_lb, indices in zip(prd,",
"if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path",
"import pdist, squareform from sklearn.metrics import accuracy_score from pairwise_classifier import * class MixtureReader:",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs,",
"'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path']",
"avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if __name__ == \"__main__\":",
"= argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob',",
"'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name':",
"from pairwise_classifier import * class MixtureReader: def __init__(self, data_dir, data_type, context): assert data_type",
"data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path",
"numpy as np import tensorflow as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper",
"itertools import chain, product from collections import defaultdict from kmedoids import kMedoids from",
"= np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True): accs",
"'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed =",
"type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool)",
"dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode in doc_mix: if sentcode in da:",
"= dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len =",
"in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs,",
"type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str)",
"def evaluate_given(self, filenames, verbose=True): accs = [] if self.out_file_path is not None: #",
"doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'],",
"ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else:",
"db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode in doc_mix: if",
"data_dir self.data_type = data_type self.context = context # int: 0 or context-length. def",
"'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir':",
"x1, x1_len = batch([x1]) x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2,",
"from kmedoids import kMedoids from scipy.spatial.distance import pdist, squareform from sklearn.metrics import accuracy_score",
"psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = [] for index in indices: word =",
"2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc,",
"index in indices: word = self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else: words.append(word)",
"{cls:[datum_id, ...], ...} lbs = [0]*doc_len for idx in C[1]: lbs[idx] = 1",
"if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self,",
"0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = [] for index in",
"['nyt', 'wiki'] self.data_dir = data_dir self.data_type = data_type self.context = context # int:",
"acc def evaluate_rand(self, k=100, verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False)",
"return acc def evaluate_rand(self, k=100, verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k,",
"'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf",
"args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved,",
"'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir':",
"doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len",
"else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices):",
"= self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN",
"max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len = batch([x1]) x2,",
"acc)) out_file.close() avg_acc = np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self,",
"filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx,",
"or context-length. def get_mixture(self, filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: #",
"self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is",
"for sentcode in doc_mix: if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context:",
"parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention',",
"import tensorflow as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import",
"argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float)",
"x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx]",
"out_file = open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx",
"product from collections import defaultdict from kmedoids import kMedoids from scipy.spatial.distance import pdist,",
"words.append(word) return ' '.join(words) def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...}",
"self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average',",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"idx in C[1]: lbs[idx] = 1 return lbs def __flip_clust(self, clust): return np.array([0",
"= batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] =",
"checkpoint_model from itertools import chain, product from collections import defaultdict from kmedoids import",
"type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int)",
"1 return lbs def __flip_clust(self, clust): return np.array([0 if i==1 else 1 for",
"= [0]*doc_len for idx in C[1]: lbs[idx] = 1 return lbs def __flip_clust(self,",
"np.mean(accs) print('\\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True): accs =",
"self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd return acc",
"'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id':",
"doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN"
] |
[
"each one a 28x28 array of uint8. Each yield would generate the next",
"of units, and generate the one after that. # Subsequent generations would continue",
"while to read in the whole # file before it returns. def read_idx_file(filename,",
"case for the scalar situation, where re-shaping doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte],",
"generate the one after that. # Subsequent generations would continue from that point.",
"rank-3 file would yield rank-2 arrays on each call, consisting of the 2",
"rank-3, with 60k units, each one a 28x28 array of uint8. Each yield",
"dtypesizes[dte]) read_units = dsizes[0] if count is None else min(dsizes[0], count) nshape =",
"return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C') # This version, on the",
"uint8. # The optional start index (defaults to 0), will skip the first",
"comes a uint32be number per dimension, yielding the size of the n-dimensional array",
"making it act just as a PNG file handle would. def to_filehandle(image): of",
"this may consume a lot of memory, and may take a while to",
"time. # A \"unit\" in this context is all the data except the",
"situation, where re-shaping doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an",
"# Subsequent generations would continue from that point. def read_idx_units(filename, start=0, count=None): #",
"'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function will read the",
"each call. # A rank-3 file would yield rank-2 arrays on each call,",
"images files, however, are rank-3, with 60k units, each one a 28x28 array",
"files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES",
"file would just yield individual scalars on each call. # A rank-3 file",
"files # first read a uint32be as the magic number, yielding data type",
"other hand, is a generator, reading one unit from the file at a",
"generator, reading one unit from the file at a time. # A \"unit\"",
"single uint8. Each yield would generate the next uint8. # The digits images",
"yield would generate the next 28x28 array of uint8. # The optional start",
"read_units) f.seek(seek_delta, 1) # So now, we can loop over the outer dimensions,",
"units, each one a 28x28 array of uint8. Each yield would generate the",
"# To make things concrete, the digits labels file is rank-1, with 60k",
"format), and number of dimensions dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f,",
"read_idx_units(filename, start=0, count=None): # Helper map of type enum values to dtype strings",
"from the file at a time. # A \"unit\" in this context is",
"= 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function will read the entire file",
"28x28 array of uint8. Each yield would generate the next 28x28 array of",
"# This version, on the other hand, is a generator, reading one unit",
"continue from that point. def read_idx_units(filename, start=0, count=None): # Helper map of type",
"'rb') as f: # Ok, let's parse one of these files # first",
"would yield rank-2 arrays on each call, consisting of the 2 lowest dimension.",
"big-endian. # The arrays are in C-style, where the last dimension index changes",
"of uint8. Each yield would generate the next 28x28 array of uint8. #",
"order='C') # This version, on the other hand, is a generator, reading one",
"order='C') else: # Special case for the scalar situation, where re-shaping doesn't make",
"None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for i",
"units, each one a single uint8. Each yield would generate the next uint8.",
"one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C') # This version,",
"data set files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS =",
"unit if len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: #",
"np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: # Special case for the scalar situation,",
"array, # of the appropriate data type, rank, and dimensionality for the given",
"all in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C') #",
"strings for numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8}",
"= 'data/mnist/train-images-idx3-ubyte' # This function will read the entire file in and return",
"the outer dimensions, setting the indexes appropriately, # and read the inner dimension",
"BytesIO filehandle and return that filehandle for later use, # making it act",
"generate the next 28x28 array of uint8. # The optional start index (defaults",
"the indexes appropriately, # and read the inner dimension as a vector all",
"generate the next uint8. # The digits images files, however, are rank-3, with",
"for later use, # making it act just as a PNG file handle",
"arrays on each call, consisting of the 2 lowest dimension. # To make",
"'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function will read the entire file in",
"# Helper map of type enum values to dtype strings for numpy dtypes",
"So now, we can loop over the outer dimensions, setting the indexes appropriately,",
"newshape=nshape, order='C') # This version, on the other hand, is a generator, reading",
"the data except the top-most dimension. # So, a rank-1 file would just",
"with open(filename, 'rb') as f: # Ok, let's parse one of these files",
"over the outer dimensions, setting the indexes appropriately, # and read the inner",
"uint8. # The digits images files, however, are rank-3, with 60k units, each",
"the inner dimension as a vector all in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte],",
"FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function will read the entire file in and",
"count is None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1)",
"number per dimension, yielding the size of the n-dimensional array in that dimension",
"return a single multi-dimensional array, # of the appropriate data type, rank, and",
"{8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f: # Ok, let's parse one",
"the top-most dimension. # So, a rank-1 file would just yield individual scalars",
"and dimensionality for the given file. # Note that this may consume a",
"one a 28x28 array of uint8. Each yield would generate the next 28x28",
"a single multi-dimensional array, # of the appropriate data type, rank, and dimensionality",
"f: # Ok, let's parse one of these files # first read a",
"value, and format), and number of dimensions dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims",
"inner dimension as a vector all in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size",
"# and read the inner dimension as a vector all in one go",
"most frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes) >",
"would generate the next uint8. # The digits images files, however, are rank-3,",
"is rank-1, with 60k units, each one a single uint8. Each yield would",
"are in C-style, where the last dimension index changes most frequently. dsizes =",
"# The optional start index (defaults to 0), will skip the first \"start\"",
"go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C') # This version, on",
"A rank-3 file would yield rank-2 arrays on each call, consisting of the",
"for the given file. # Note that this may consume a lot of",
"C-style, where the last dimension index changes most frequently. dsizes = np.fromfile(f, dtype='>u4',",
"yield would generate the next uint8. # The digits images files, however, are",
"that dimension # Only after all those dimension sizes, comes the data, all",
"data into a BytesIO filehandle and return that filehandle for later use, #",
"as a PNG file handle would. def to_filehandle(image): of = io.BytesIO() ImageOps.invert(Image.fromarray(image)).convert(\"RGB\").save(of, \"PNG\")",
"the magic number, yielding data type (size of each value, and format), and",
"if count is None else min(dsizes[0], count) nshape = dsizes nshape[0] = read_units",
"digits images files, however, are rank-3, with 60k units, each one a 28x28",
"index (defaults to 0), will skip the first \"start\" count of units, and",
"and format), and number of dimensions dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims =",
"on each call, consisting of the 2 lowest dimension. # To make things",
"memory, and may take a while to read in the whole # file",
"data type, rank, and dimensionality for the given file. # Note that this",
"= int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1 seek_delta = int(start * unit_size",
"those dimension sizes, comes the data, all big-endian. # The arrays are in",
"a time. # A \"unit\" in this context is all the data except",
"9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f: # Ok, let's parse one of",
"call. # A rank-3 file would yield rank-2 arrays on each call, consisting",
"in range(read_units): # Read the next unit if len(dsizes) > 1: yield np.reshape(np.fromfile(f,",
"count of units, and generate the one after that. # Subsequent generations would",
"can loop over the outer dimensions, setting the indexes appropriately, # and read",
"np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C') # This version, on the other",
"point. def read_idx_units(filename, start=0, count=None): # Helper map of type enum values to",
"a PNG file handle would. def to_filehandle(image): of = io.BytesIO() ImageOps.invert(Image.fromarray(image)).convert(\"RGB\").save(of, \"PNG\") of.seek(0)",
"comes the data, all big-endian. # The arrays are in C-style, where the",
"dimensionality for the given file. # Note that this may consume a lot",
"units, and generate the one after that. # Subsequent generations would continue from",
"the n-dimensional array in that dimension # Only after all those dimension sizes,",
"data except the top-most dimension. # So, a rank-1 file would just yield",
"filehandle for later use, # making it act just as a PNG file",
"array of uint8. Each yield would generate the next 28x28 array of uint8.",
"one of these files # first read a uint32be as the magic number,",
"read_idx_file(filename, start=0, count=None): # Helper map of type enum values to dtype strings",
"make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an MNIST image, put the",
"sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an MNIST image, put the data",
"# This function will read the entire file in and return a single",
"So, a rank-1 file would just yield individual scalars on each call. #",
"scalars on each call. # A rank-3 file would yield rank-2 arrays on",
"count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1 seek_delta = int(start",
"doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an MNIST image, put",
"dtype=dtypes[dte], count=1)[0] # From an MNIST image, put the data into a BytesIO",
"first read a uint32be as the magic number, yielding data type (size of",
"count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for i in range(read_units): #",
"in and return a single multi-dimensional array, # of the appropriate data type,",
"it returns. def read_idx_file(filename, start=0, count=None): # Helper map of type enum values",
"and generate the one after that. # Subsequent generations would continue from that",
"# Only after all those dimension sizes, comes the data, all big-endian. #",
"and number of dimensions dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1',",
"None else min(dsizes[0], count) nshape = dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size,",
"numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename,",
"read the inner dimension as a vector all in one go return np.reshape(np.fromfile(f,",
"from that point. def read_idx_units(filename, start=0, count=None): # Helper map of type enum",
"appropriate data type, rank, and dimensionality for the given file. # Note that",
"the entire file in and return a single multi-dimensional array, # of the",
"file before it returns. def read_idx_file(filename, start=0, count=None): # Helper map of type",
"the whole # file before it returns. def read_idx_file(filename, start=0, count=None): # Helper",
"Read the next unit if len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:],",
"dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f: # Ok, let's",
"= {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as",
"next 28x28 array of uint8. # The optional start index (defaults to 0),",
"concrete, the digits labels file is rank-1, with 60k units, each one a",
"this context is all the data except the top-most dimension. # So, a",
"on each call. # A rank-3 file would yield rank-2 arrays on each",
"# From an MNIST image, put the data into a BytesIO filehandle and",
"later use, # making it act just as a PNG file handle would.",
"of dimensions dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy,",
"Only after all those dimension sizes, comes the data, all big-endian. # The",
"and may take a while to read in the whole # file before",
"nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) # So now,",
"all big-endian. # The arrays are in C-style, where the last dimension index",
"next uint8. # The digits images files, however, are rank-3, with 60k units,",
"just yield individual scalars on each call. # A rank-3 file would yield",
"optional start index (defaults to 0), will skip the first \"start\" count of",
"= dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) #",
"data, all big-endian. # The arrays are in C-style, where the last dimension",
"#print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for i in range(read_units): # Read the",
"1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: # Special case for the",
"'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function",
"the next unit if len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C')",
"dimensions dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte,",
"the data into a BytesIO filehandle and return that filehandle for later use,",
"the last dimension index changes most frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size",
"for the scalar situation, where re-shaping doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0]",
"consume a lot of memory, and may take a while to read in",
"appropriately, # and read the inner dimension as a vector all in one",
"would continue from that point. def read_idx_units(filename, start=0, count=None): # Helper map of",
"count=1)[0] # From an MNIST image, put the data into a BytesIO filehandle",
"if count is None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta,",
"last dimension index changes most frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size =",
"for numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with",
"digits labels file is rank-1, with 60k units, each one a single uint8.",
"labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte'",
"Each yield would generate the next 28x28 array of uint8. # The optional",
"yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an MNIST image, put the data into",
"MNIST image, put the data into a BytesIO filehandle and return that filehandle",
"FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function will read the entire",
"would generate the next 28x28 array of uint8. # The optional start index",
"* unit_size * dtypesizes[dte]) read_units = dsizes[0] if count is None else min(dsizes[0],",
"# The digits images files, however, are rank-3, with 60k units, each one",
"import io # Training and test data set files, all labeled FN_TEST_LABELS =",
"'data/mnist/train-images-idx3-ubyte' # This function will read the entire file in and return a",
"= np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) # Then comes a",
"the next 28x28 array of uint8. # The optional start index (defaults to",
"put the data into a BytesIO filehandle and return that filehandle for later",
"# making it act just as a PNG file handle would. def to_filehandle(image):",
"given file. # Note that this may consume a lot of memory, and",
"individual scalars on each call. # A rank-3 file would yield rank-2 arrays",
"uint8. Each yield would generate the next 28x28 array of uint8. # The",
"open(filename, 'rb') as f: # Ok, let's parse one of these files #",
"one unit from the file at a time. # A \"unit\" in this",
"# Then comes a uint32be number per dimension, yielding the size of the",
"unit_size * dtypesizes[dte]) read_units = dsizes[0] if count is None else min(dsizes[0], count)",
"# A \"unit\" in this context is all the data except the top-most",
"with 60k units, each one a 28x28 array of uint8. Each yield would",
"rank, and dimensionality for the given file. # Note that this may consume",
"dtypes[dte], dtypesizes[dte], dims) # Then comes a uint32be number per dimension, yielding the",
"dimension, yielding the size of the n-dimensional array in that dimension # Only",
"#print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) # So now, we can loop",
"#print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) # Then comes a uint32be number per dimension,",
"of each value, and format), and number of dimensions dummy, = np.fromfile(f, dtype='>u2',",
"are rank-3, with 60k units, each one a 28x28 array of uint8. Each",
"at a time. # A \"unit\" in this context is all the data",
"changes most frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes)",
"now, we can loop over the outer dimensions, setting the indexes appropriately, #",
"The arrays are in C-style, where the last dimension index changes most frequently.",
"a uint32be as the magic number, yielding data type (size of each value,",
"file. # Note that this may consume a lot of memory, and may",
"and read the inner dimension as a vector all in one go return",
"yielding data type (size of each value, and format), and number of dimensions",
"skip the first \"start\" count of units, and generate the one after that.",
"#print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for i in range(read_units): # Read",
"all those dimension sizes, comes the data, all big-endian. # The arrays are",
"number, yielding data type (size of each value, and format), and number of",
"one a single uint8. Each yield would generate the next uint8. # The",
"np from PIL import Image, ImageOps import io # Training and test data",
"files, however, are rank-3, with 60k units, each one a 28x28 array of",
"file would yield rank-2 arrays on each call, consisting of the 2 lowest",
"(defaults to 0), will skip the first \"start\" count of units, and generate",
"newshape=dsizes[1:], order='C') else: # Special case for the scalar situation, where re-shaping doesn't",
"rank-2 arrays on each call, consisting of the 2 lowest dimension. # To",
"first \"start\" count of units, and generate the one after that. # Subsequent",
"\"unit\" in this context is all the data except the top-most dimension. #",
"a generator, reading one unit from the file at a time. # A",
"dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: # Special case for the scalar situation, where",
"a 28x28 array of uint8. Each yield would generate the next 28x28 array",
"start index (defaults to 0), will skip the first \"start\" count of units,",
"dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) # So",
"returns. def read_idx_file(filename, start=0, count=None): # Helper map of type enum values to",
"is a generator, reading one unit from the file at a time. #",
"import Image, ImageOps import io # Training and test data set files, all",
"dims) # Then comes a uint32be number per dimension, yielding the size of",
"context is all the data except the top-most dimension. # So, a rank-1",
"after all those dimension sizes, comes the data, all big-endian. # The arrays",
"in that dimension # Only after all those dimension sizes, comes the data,",
"in the whole # file before it returns. def read_idx_file(filename, start=0, count=None): #",
"re-shaping doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an MNIST image,",
"import numpy as np from PIL import Image, ImageOps import io # Training",
"dtypesizes[dte], dims) # Then comes a uint32be number per dimension, yielding the size",
"count=None): # Helper map of type enum values to dtype strings for numpy",
"else min(dsizes[0], count) nshape = dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta,",
"the digits labels file is rank-1, with 60k units, each one a single",
"all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES =",
"Note that this may consume a lot of memory, and may take a",
"a while to read in the whole # file before it returns. def",
"> 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: # Special case for",
"generations would continue from that point. def read_idx_units(filename, start=0, count=None): # Helper map",
"may take a while to read in the whole # file before it",
"hand, is a generator, reading one unit from the file at a time.",
"else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for i in",
"a rank-1 file would just yield individual scalars on each call. # A",
"read the entire file in and return a single multi-dimensional array, # of",
"unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for i in range(read_units): # Read the next",
"for i in range(read_units): # Read the next unit if len(dsizes) > 1:",
"count=int(unit_size * read_units)), newshape=nshape, order='C') # This version, on the other hand, is",
"whole # file before it returns. def read_idx_file(filename, start=0, count=None): # Helper map",
"however, are rank-3, with 60k units, each one a 28x28 array of uint8.",
"seek_delta, read_units) f.seek(seek_delta, 1) for i in range(read_units): # Read the next unit",
"1) # So now, we can loop over the outer dimensions, setting the",
"yield rank-2 arrays on each call, consisting of the 2 lowest dimension. #",
"= int(start * unit_size * dtypesizes[dte]) read_units = dsizes[0] if count is None",
"This function will read the entire file in and return a single multi-dimensional",
"a vector all in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape,",
"# Ok, let's parse one of these files # first read a uint32be",
"file is rank-1, with 60k units, each one a single uint8. Each yield",
"dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else",
"= 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This",
"np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims)",
"things concrete, the digits labels file is rank-1, with 60k units, each one",
"reading one unit from the file at a time. # A \"unit\" in",
"from PIL import Image, ImageOps import io # Training and test data set",
"arrays are in C-style, where the last dimension index changes most frequently. dsizes",
"lowest dimension. # To make things concrete, the digits labels file is rank-1,",
"= dsizes[0] if count is None else min(dsizes[0], count) nshape = dsizes nshape[0]",
"This version, on the other hand, is a generator, reading one unit from",
"the other hand, is a generator, reading one unit from the file at",
"these files # first read a uint32be as the magic number, yielding data",
"frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1",
"# Training and test data set files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES",
"60k units, each one a 28x28 array of uint8. Each yield would generate",
"dsizes[0] if count is None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units)",
"count=unit_size), newshape=dsizes[1:], order='C') else: # Special case for the scalar situation, where re-shaping",
"Subsequent generations would continue from that point. def read_idx_units(filename, start=0, count=None): # Helper",
"nshape = dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1)",
"int(start * unit_size * dtypesizes[dte]) read_units = dsizes[0] if count is None else",
"file at a time. # A \"unit\" in this context is all the",
"read_units) f.seek(seek_delta, 1) for i in range(read_units): # Read the next unit if",
"of the appropriate data type, rank, and dimensionality for the given file. #",
"of the 2 lowest dimension. # To make things concrete, the digits labels",
"read_units = dsizes[0] if count is None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size,",
"f.seek(seek_delta, 1) for i in range(read_units): # Read the next unit if len(dsizes)",
"per dimension, yielding the size of the n-dimensional array in that dimension #",
"f.seek(seek_delta, 1) # So now, we can loop over the outer dimensions, setting",
"unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1 seek_delta = int(start *",
"that this may consume a lot of memory, and may take a while",
"in C-style, where the last dimension index changes most frequently. dsizes = np.fromfile(f,",
"the file at a time. # A \"unit\" in this context is all",
"Training and test data set files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES =",
"= {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f: # Ok, let's parse",
"and test data set files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte'",
"yielding the size of the n-dimensional array in that dimension # Only after",
"the size of the n-dimensional array in that dimension # Only after all",
"next unit if len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else:",
"dimension. # So, a rank-1 file would just yield individual scalars on each",
"vector all in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C')",
"Image, ImageOps import io # Training and test data set files, all labeled",
"60k units, each one a single uint8. Each yield would generate the next",
"dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1 seek_delta =",
"Each yield would generate the next uint8. # The digits images files, however,",
"before it returns. def read_idx_file(filename, start=0, count=None): # Helper map of type enum",
"rank-1, with 60k units, each one a single uint8. Each yield would generate",
"top-most dimension. # So, a rank-1 file would just yield individual scalars on",
"min(dsizes[0], count) nshape = dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units)",
"2 lowest dimension. # To make things concrete, the digits labels file is",
"multi-dimensional array, # of the appropriate data type, rank, and dimensionality for the",
"setting the indexes appropriately, # and read the inner dimension as a vector",
"The digits images files, however, are rank-3, with 60k units, each one a",
"# Special case for the scalar situation, where re-shaping doesn't make sense. yield",
"act just as a PNG file handle would. def to_filehandle(image): of = io.BytesIO()",
"dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C') # This version, on the other hand,",
"that filehandle for later use, # making it act just as a PNG",
"np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an MNIST image, put the data into a",
"is all the data except the top-most dimension. # So, a rank-1 file",
"parse one of these files # first read a uint32be as the magic",
"Then comes a uint32be number per dimension, yielding the size of the n-dimensional",
"call, consisting of the 2 lowest dimension. # To make things concrete, the",
"def read_idx_file(filename, start=0, count=None): # Helper map of type enum values to dtype",
"# Note that this may consume a lot of memory, and may take",
"may consume a lot of memory, and may take a while to read",
"if len(dsizes) > 1 else 1 seek_delta = int(start * unit_size * dtypesizes[dte])",
"size of the n-dimensional array in that dimension # Only after all those",
"0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f: # Ok,",
"dimension index changes most frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:]))",
"dsizes[0] if count is None else min(dsizes[0], count) nshape = dsizes nshape[0] =",
"scalar situation, where re-shaping doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From",
"# Read the next unit if len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size),",
"sizes, comes the data, all big-endian. # The arrays are in C-style, where",
"count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) # Then comes a uint32be number per",
"# first read a uint32be as the magic number, yielding data type (size",
"function will read the entire file in and return a single multi-dimensional array,",
"(size of each value, and format), and number of dimensions dummy, = np.fromfile(f,",
"= dsizes[0] if count is None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta,",
"ImageOps import io # Training and test data set files, all labeled FN_TEST_LABELS",
"let's parse one of these files # first read a uint32be as the",
"unit_size, seek_delta, read_units) f.seek(seek_delta, 1) # So now, we can loop over the",
"The optional start index (defaults to 0), will skip the first \"start\" count",
"dtype strings for numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1,",
"dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) #",
"consisting of the 2 lowest dimension. # To make things concrete, the digits",
"* dtypesizes[dte]) read_units = dsizes[0] if count is None else min(dsizes[0], count) nshape",
"dimensions, setting the indexes appropriately, # and read the inner dimension as a",
"Helper map of type enum values to dtype strings for numpy dtypes =",
"that point. def read_idx_units(filename, start=0, count=None): # Helper map of type enum values",
"unit from the file at a time. # A \"unit\" in this context",
"type enum values to dtype strings for numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'}",
"of memory, and may take a while to read in the whole #",
"of type enum values to dtype strings for numpy dtypes = {8:'>u1', 9:'>i1',",
"seek_delta, read_units) f.seek(seek_delta, 1) # So now, we can loop over the outer",
"filehandle and return that filehandle for later use, # making it act just",
"except the top-most dimension. # So, a rank-1 file would just yield individual",
"{8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f:",
"A \"unit\" in this context is all the data except the top-most dimension.",
"after that. # Subsequent generations would continue from that point. def read_idx_units(filename, start=0,",
"28x28 array of uint8. # The optional start index (defaults to 0), will",
"= np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1",
"where re-shaping doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] # From an MNIST",
"seek_delta = int(start * unit_size * dtypesizes[dte]) read_units = dsizes[0] if count is",
"a BytesIO filehandle and return that filehandle for later use, # making it",
"start=0, count=None): # Helper map of type enum values to dtype strings for",
"FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function will",
"as a vector all in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)),",
"the data, all big-endian. # The arrays are in C-style, where the last",
"read in the whole # file before it returns. def read_idx_file(filename, start=0, count=None):",
"the 2 lowest dimension. # To make things concrete, the digits labels file",
"# So now, we can loop over the outer dimensions, setting the indexes",
"type (size of each value, and format), and number of dimensions dummy, =",
"From an MNIST image, put the data into a BytesIO filehandle and return",
"on the other hand, is a generator, reading one unit from the file",
"and return that filehandle for later use, # making it act just as",
"entire file in and return a single multi-dimensional array, # of the appropriate",
"def read_idx_units(filename, start=0, count=None): # Helper map of type enum values to dtype",
"of uint8. # The optional start index (defaults to 0), will skip the",
"to dtype strings for numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1,",
"array in that dimension # Only after all those dimension sizes, comes the",
"dte, dtypes[dte], dtypesizes[dte], dims) # Then comes a uint32be number per dimension, yielding",
"9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f: #",
"dimension # Only after all those dimension sizes, comes the data, all big-endian.",
"1 else 1 seek_delta = int(start * unit_size * dtypesizes[dte]) read_units = dsizes[0]",
"indexes appropriately, # and read the inner dimension as a vector all in",
"is None else min(dsizes[0], count) nshape = dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes),",
"the given file. # Note that this may consume a lot of memory,",
"file in and return a single multi-dimensional array, # of the appropriate data",
"the first \"start\" count of units, and generate the one after that. #",
"<reponame>natashadsilva/sample.edge-mnist-notebook import numpy as np from PIL import Image, ImageOps import io #",
"numpy as np from PIL import Image, ImageOps import io # Training and",
"each value, and format), and number of dimensions dummy, = np.fromfile(f, dtype='>u2', count=1)",
"where the last dimension index changes most frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims)",
"int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1 seek_delta = int(start * unit_size *",
"file handle would. def to_filehandle(image): of = io.BytesIO() ImageOps.invert(Image.fromarray(image)).convert(\"RGB\").save(of, \"PNG\") of.seek(0) return of",
"len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: # Special case",
"min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for i in range(read_units):",
"into a BytesIO filehandle and return that filehandle for later use, # making",
"one after that. # Subsequent generations would continue from that point. def read_idx_units(filename,",
"n-dimensional array in that dimension # Only after all those dimension sizes, comes",
"we can loop over the outer dimensions, setting the indexes appropriately, # and",
"else: # Special case for the scalar situation, where re-shaping doesn't make sense.",
"make things concrete, the digits labels file is rank-1, with 60k units, each",
"as np from PIL import Image, ImageOps import io # Training and test",
"io # Training and test data set files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte'",
"i in range(read_units): # Read the next unit if len(dsizes) > 1: yield",
"= 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' # This function will read",
"dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) # Then comes a uint32be number",
"uint8. Each yield would generate the next uint8. # The digits images files,",
"index changes most frequently. dsizes = np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if",
"dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte],",
"as f: # Ok, let's parse one of these files # first read",
"yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: # Special case for the scalar",
"and return a single multi-dimensional array, # of the appropriate data type, rank,",
"* read_units)), newshape=nshape, order='C') # This version, on the other hand, is a",
"the one after that. # Subsequent generations would continue from that point. def",
"> 1 else 1 seek_delta = int(start * unit_size * dtypesizes[dte]) read_units =",
"Ok, let's parse one of these files # first read a uint32be as",
"of these files # first read a uint32be as the magic number, yielding",
"uint32be number per dimension, yielding the size of the n-dimensional array in that",
"version, on the other hand, is a generator, reading one unit from the",
"would just yield individual scalars on each call. # A rank-3 file would",
"dimension as a vector all in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size *",
"* dtypesizes[dte]) read_units = dsizes[0] if count is None else min(dsizes[0], count) #print(dsizes)",
"just as a PNG file handle would. def to_filehandle(image): of = io.BytesIO() ImageOps.invert(Image.fromarray(image)).convert(\"RGB\").save(of,",
"range(read_units): # Read the next unit if len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte],",
"labels file is rank-1, with 60k units, each one a single uint8. Each",
"if len(dsizes) > 1: yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C') else: # Special",
"read a uint32be as the magic number, yielding data type (size of each",
"lot of memory, and may take a while to read in the whole",
"uint32be as the magic number, yielding data type (size of each value, and",
"1 seek_delta = int(start * unit_size * dtypesizes[dte]) read_units = dsizes[0] if count",
"that. # Subsequent generations would continue from that point. def read_idx_units(filename, start=0, count=None):",
"a uint32be number per dimension, yielding the size of the n-dimensional array in",
"all the data except the top-most dimension. # So, a rank-1 file would",
"\"start\" count of units, and generate the one after that. # Subsequent generations",
"of the n-dimensional array in that dimension # Only after all those dimension",
"yield individual scalars on each call. # A rank-3 file would yield rank-2",
"dimension. # To make things concrete, the digits labels file is rank-1, with",
"# file before it returns. def read_idx_file(filename, start=0, count=None): # Helper map of",
"will read the entire file in and return a single multi-dimensional array, #",
"will skip the first \"start\" count of units, and generate the one after",
"= read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) # So now, we",
"# The arrays are in C-style, where the last dimension index changes most",
"read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) # So now, we can",
"# So, a rank-1 file would just yield individual scalars on each call.",
"each one a single uint8. Each yield would generate the next uint8. #",
"use, # making it act just as a PNG file handle would. def",
"# of the appropriate data type, rank, and dimensionality for the given file.",
"read_units)), newshape=nshape, order='C') # This version, on the other hand, is a generator,",
"PIL import Image, ImageOps import io # Training and test data set files,",
"dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) # Then comes",
"read_units = dsizes[0] if count is None else min(dsizes[0], count) nshape = dsizes",
"the appropriate data type, rank, and dimensionality for the given file. # Note",
"the scalar situation, where re-shaping doesn't make sense. yield np.fromfile(f, dtype=dtypes[dte], count=1)[0] #",
"type, rank, and dimensionality for the given file. # Note that this may",
"is None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) for",
"test data set files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS",
"outer dimensions, setting the indexes appropriately, # and read the inner dimension as",
"to 0), will skip the first \"start\" count of units, and generate the",
"take a while to read in the whole # file before it returns.",
"count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) # Then",
"image, put the data into a BytesIO filehandle and return that filehandle for",
"magic number, yielding data type (size of each value, and format), and number",
"an MNIST image, put the data into a BytesIO filehandle and return that",
"array of uint8. # The optional start index (defaults to 0), will skip",
"each call, consisting of the 2 lowest dimension. # To make things concrete,",
"map of type enum values to dtype strings for numpy dtypes = {8:'>u1',",
"0), will skip the first \"start\" count of units, and generate the one",
"a lot of memory, and may take a while to read in the",
"len(dsizes) > 1 else 1 seek_delta = int(start * unit_size * dtypesizes[dte]) read_units",
"#print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta, 1) # So now, we can loop over",
"as the magic number, yielding data type (size of each value, and format),",
"loop over the outer dimensions, setting the indexes appropriately, # and read the",
"To make things concrete, the digits labels file is rank-1, with 60k units,",
"it act just as a PNG file handle would. def to_filehandle(image): of =",
"in one go return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C') # This",
"FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte' FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte' #",
"to read in the whole # file before it returns. def read_idx_file(filename, start=0,",
"values to dtype strings for numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes =",
"Special case for the scalar situation, where re-shaping doesn't make sense. yield np.fromfile(f,",
"else 1 seek_delta = int(start * unit_size * dtypesizes[dte]) read_units = dsizes[0] if",
"rank-1 file would just yield individual scalars on each call. # A rank-3",
"with 60k units, each one a single uint8. Each yield would generate the",
"count is None else min(dsizes[0], count) nshape = dsizes nshape[0] = read_units #print(dsizes)",
"number of dimensions dummy, = np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2)",
"in this context is all the data except the top-most dimension. # So,",
"= np.fromfile(f, dtype='>u2', count=1) dte,dims = np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte],",
"a single uint8. Each yield would generate the next uint8. # The digits",
"data type (size of each value, and format), and number of dimensions dummy,",
"1) for i in range(read_units): # Read the next unit if len(dsizes) >",
"0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb') as f: # Ok, let's parse one of these",
"the next uint8. # The digits images files, however, are rank-3, with 60k",
"count) nshape = dsizes nshape[0] = read_units #print(dsizes) #print(np.prod(dsizes), unit_size, seek_delta, read_units) f.seek(seek_delta,",
"dtypesizes[dte]) read_units = dsizes[0] if count is None else min(dsizes[0], count) #print(dsizes) #print(np.prod(dsizes),",
"dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8} with open(filename, 'rb')",
"enum values to dtype strings for numpy dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'} dtypesizes",
"return that filehandle for later use, # making it act just as a",
"PNG file handle would. def to_filehandle(image): of = io.BytesIO() ImageOps.invert(Image.fromarray(image)).convert(\"RGB\").save(of, \"PNG\") of.seek(0) return",
"dimension sizes, comes the data, all big-endian. # The arrays are in C-style,",
"np.fromfile(f, dtype='>u4', count=dims) unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1 seek_delta",
"set files, all labeled FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte' FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte' FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte'",
"np.fromfile(f, dtype='>u1', count=2) #print(dummy, dte, dtypes[dte], dtypesizes[dte], dims) # Then comes a uint32be",
"single multi-dimensional array, # of the appropriate data type, rank, and dimensionality for",
"# A rank-3 file would yield rank-2 arrays on each call, consisting of"
] |
[
"select_review_by_id(conn: SAConn, pk: int) -> RowProxy: cursor = await conn.execute( review.select().where(review.c.id == pk)",
"-> RowProxy: cursor = await conn.execute( review.select().where(review.c.id == pk) ) item = await",
"str) ->RowProxy: cursor = await conn.execute( review.insert().values( { 'text': text, } ) )",
"import SAConnection as SAConn from aiopg.sa.result import RowProxy from info.reviews.tables import review async",
"RowProxy from info.reviews.tables import review async def select_review_by_id(conn: SAConn, pk: int) -> RowProxy:",
"item = await cursor.fetchone() return item async def create_review(conn: SAConn, text: str) ->RowProxy:",
"text: str) ->RowProxy: cursor = await conn.execute( review.insert().values( { 'text': text, } )",
"= await cursor.fetchone() return item async def create_review(conn: SAConn, text: str) ->RowProxy: cursor",
"pk: int) -> RowProxy: cursor = await conn.execute( review.select().where(review.c.id == pk) ) item",
"pk) ) item = await cursor.fetchone() return item async def create_review(conn: SAConn, text:",
"create_review(conn: SAConn, text: str) ->RowProxy: cursor = await conn.execute( review.insert().values( { 'text': text,",
"await cursor.fetchone() return item async def create_review(conn: SAConn, text: str) ->RowProxy: cursor =",
"await conn.execute( review.insert().values( { 'text': text, } ) ) item = await cursor.fetchone()",
"<gh_stars>1-10 from aiopg.sa import SAConnection as SAConn from aiopg.sa.result import RowProxy from info.reviews.tables",
"review async def select_review_by_id(conn: SAConn, pk: int) -> RowProxy: cursor = await conn.execute(",
"conn.execute( review.select().where(review.c.id == pk) ) item = await cursor.fetchone() return item async def",
"== pk) ) item = await cursor.fetchone() return item async def create_review(conn: SAConn,",
") item = await cursor.fetchone() return item async def create_review(conn: SAConn, text: str)",
"SAConn, text: str) ->RowProxy: cursor = await conn.execute( review.insert().values( { 'text': text, }",
"= await conn.execute( review.insert().values( { 'text': text, } ) ) item = await",
"from aiopg.sa.result import RowProxy from info.reviews.tables import review async def select_review_by_id(conn: SAConn, pk:",
"= await conn.execute( review.select().where(review.c.id == pk) ) item = await cursor.fetchone() return item",
"int) -> RowProxy: cursor = await conn.execute( review.select().where(review.c.id == pk) ) item =",
"cursor = await conn.execute( review.select().where(review.c.id == pk) ) item = await cursor.fetchone() return",
"SAConn from aiopg.sa.result import RowProxy from info.reviews.tables import review async def select_review_by_id(conn: SAConn,",
"from aiopg.sa import SAConnection as SAConn from aiopg.sa.result import RowProxy from info.reviews.tables import",
"return item async def create_review(conn: SAConn, text: str) ->RowProxy: cursor = await conn.execute(",
"->RowProxy: cursor = await conn.execute( review.insert().values( { 'text': text, } ) ) item",
"conn.execute( review.insert().values( { 'text': text, } ) ) item = await cursor.fetchone() return",
"cursor.fetchone() return item async def create_review(conn: SAConn, text: str) ->RowProxy: cursor = await",
"RowProxy: cursor = await conn.execute( review.select().where(review.c.id == pk) ) item = await cursor.fetchone()",
"from info.reviews.tables import review async def select_review_by_id(conn: SAConn, pk: int) -> RowProxy: cursor",
"cursor = await conn.execute( review.insert().values( { 'text': text, } ) ) item =",
"review.insert().values( { 'text': text, } ) ) item = await cursor.fetchone() return item.id",
"import RowProxy from info.reviews.tables import review async def select_review_by_id(conn: SAConn, pk: int) ->",
"aiopg.sa import SAConnection as SAConn from aiopg.sa.result import RowProxy from info.reviews.tables import review",
"async def create_review(conn: SAConn, text: str) ->RowProxy: cursor = await conn.execute( review.insert().values( {",
"SAConn, pk: int) -> RowProxy: cursor = await conn.execute( review.select().where(review.c.id == pk) )",
"await conn.execute( review.select().where(review.c.id == pk) ) item = await cursor.fetchone() return item async",
"def select_review_by_id(conn: SAConn, pk: int) -> RowProxy: cursor = await conn.execute( review.select().where(review.c.id ==",
"review.select().where(review.c.id == pk) ) item = await cursor.fetchone() return item async def create_review(conn:",
"SAConnection as SAConn from aiopg.sa.result import RowProxy from info.reviews.tables import review async def",
"async def select_review_by_id(conn: SAConn, pk: int) -> RowProxy: cursor = await conn.execute( review.select().where(review.c.id",
"aiopg.sa.result import RowProxy from info.reviews.tables import review async def select_review_by_id(conn: SAConn, pk: int)",
"item async def create_review(conn: SAConn, text: str) ->RowProxy: cursor = await conn.execute( review.insert().values(",
"as SAConn from aiopg.sa.result import RowProxy from info.reviews.tables import review async def select_review_by_id(conn:",
"def create_review(conn: SAConn, text: str) ->RowProxy: cursor = await conn.execute( review.insert().values( { 'text':",
"info.reviews.tables import review async def select_review_by_id(conn: SAConn, pk: int) -> RowProxy: cursor =",
"import review async def select_review_by_id(conn: SAConn, pk: int) -> RowProxy: cursor = await"
] |
[
"from models import Team, User def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user =",
"import s from models import Team, User def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first()",
"User def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team, user",
"team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team, user def get_team(team_name): team =",
"user = s.query(User).filter_by(username=username).first() return team, user def get_team(team_name): team = s.query(Team).filter_by(name=team_name).first() return team",
"get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team, user def get_team(team_name):",
"crud import s from models import Team, User def get_team_user(team_name, username): team =",
"s from models import Team, User def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user",
"def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team, user def",
"from crud import s from models import Team, User def get_team_user(team_name, username): team",
"models import Team, User def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first()",
"username): team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team, user def get_team(team_name): team",
"<reponame>pastorhudson/mtb-pykeybasebot from crud import s from models import Team, User def get_team_user(team_name, username):",
"= s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team, user def get_team(team_name): team = s.query(Team).filter_by(name=team_name).first()",
"import Team, User def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return",
"s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team, user def get_team(team_name): team = s.query(Team).filter_by(name=team_name).first() return",
"Team, User def get_team_user(team_name, username): team = s.query(Team).filter_by(name=team_name).first() user = s.query(User).filter_by(username=username).first() return team,"
] |
[
"app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return render_template('index.html') else: return render_template('index.html')",
"from flask import Flask, render_template, request, redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def",
"= Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return render_template('index.html') else: return render_template('index.html') if",
"<reponame>85599/power-napp<filename>website.py from flask import Flask, render_template, request, redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST'])",
"render_template, request, redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return",
"website(): if request.method==\"GET\": return render_template('index.html') else: return render_template('index.html') if __name__ == '__main__': app.run(debug=True)",
"import Flask, render_template, request, redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if",
"flask import Flask, render_template, request, redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website():",
"session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return render_template('index.html') else: return",
"redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return render_template('index.html') else:",
"Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return render_template('index.html') else: return render_template('index.html') if __name__",
"@app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return render_template('index.html') else: return render_template('index.html') if __name__ ==",
"request, redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\": return render_template('index.html')",
"Flask, render_template, request, redirect, session app = Flask(__name__) @app.route('/',methods=['GET','POST']) def website(): if request.method==\"GET\":",
"def website(): if request.method==\"GET\": return render_template('index.html') else: return render_template('index.html') if __name__ == '__main__':"
] |
[] |
[
"{ 'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084",
"}, 'france': { 'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62 }, 'norway': {",
"'france': { 'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo',",
"'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62 }, 'norway':",
"'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084 } }",
"{ 'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084 } } print(europe['france']['population']) data={'cpaital':'Islamabad', 'population",
"'germany': { 'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084 } } print(europe['france']['population']) data={'cpaital':'Islamabad',",
"camp/Dictionarty_part_2.py europe = { 'spain': { 'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03",
"}, 'germany': { 'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084 } } print(europe['france']['population'])",
"europe = { 'spain': { 'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03 },",
"'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62 },",
"{ 'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62",
"<reponame>hamzashabbir11/dataStructures<filename>data camp/Dictionarty_part_2.py europe = { 'spain': { 'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris',",
"'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084 } } print(europe['france']['population']) data={'cpaital':'Islamabad', 'population ':",
"{ 'spain': { 'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03 }, 'germany': {",
"'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084 } } print(europe['france']['population']) data={'cpaital':'Islamabad', 'population ': 220}",
"'norway': { 'capital':'oslo', 'population':5.084 } } print(europe['france']['population']) data={'cpaital':'Islamabad', 'population ': 220} europe['Pakistan']=data print(europe)",
"}, 'norway': { 'capital':'oslo', 'population':5.084 } } print(europe['france']['population']) data={'cpaital':'Islamabad', 'population ': 220} europe['Pakistan']=data",
"'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin', 'population':80.62 }, 'norway': { 'capital':'oslo', 'population':5.084 }",
"'spain': { 'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03 }, 'germany': { 'capital':'berlin',",
"= { 'spain': { 'capital':'madrid', 'population':46.77 }, 'france': { 'capital':'paris', 'population':66.03 }, 'germany':"
] |
[
"uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks if 'nova-compute' process",
"\"\"\" def __init__(self, ps): if 'nova-compute' not in ps.running: raise SkipComponent('Not OpenStack Compute",
"component from insights.parsers.ps import PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The",
"``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks if",
"parser to determine OpenStack Compute node. It checks if 'nova-compute' process exist, if",
"component will not fire. Can be added as a dependency of a parser",
"import PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses",
"insights.parsers.ps import PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component",
"``PsAuxcww`` parser to determine OpenStack Compute node. It checks if ``nova-compute`` process exist,",
"Compute node. It checks if 'nova-compute' process exist, if not raises ``SkipComponent`` so",
"``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does not exist. \"\"\" def __init__(self, ps):",
"'nova-compute' process exist, if not raises ``SkipComponent`` so that the dependent component will",
"that the parser only fires if the ``IsIsOpenStackCompute`` dependency is met. \"\"\" from",
"to determine OpenStack Compute node. It checks if 'nova-compute' process exist, if not",
"is met. \"\"\" from insights.core.plugins import component from insights.parsers.ps import PsAuxcww from insights.core.dr",
"node. It checks if ``nova-compute`` process exist, if not raises ``SkipComponent``. Raises: SkipComponent:",
"checks if ``nova-compute`` process exist, if not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute``",
"a dependency of a parser so that the parser only fires if the",
"the dependent component will not fire. Can be added as a dependency of",
"``nova-compute`` process exist, if not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does",
"OpenStack Compute node. It checks if 'nova-compute' process exist, if not raises ``SkipComponent``",
"================== The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It",
"process does not exist. \"\"\" def __init__(self, ps): if 'nova-compute' not in ps.running:",
"if not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does not exist. \"\"\"",
"the ``IsIsOpenStackCompute`` dependency is met. \"\"\" from insights.core.plugins import component from insights.parsers.ps import",
"component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks if 'nova-compute'",
"exist, if not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does not exist.",
"``SkipComponent`` so that the dependent component will not fire. Can be added as",
"\"\"\" from insights.core.plugins import component from insights.parsers.ps import PsAuxcww from insights.core.dr import SkipComponent",
"raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does not exist. \"\"\" def __init__(self,",
"if 'nova-compute' process exist, if not raises ``SkipComponent`` so that the dependent component",
"\"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks",
"OpenStack Compute node. It checks if ``nova-compute`` process exist, if not raises ``SkipComponent``.",
"dependent component will not fire. Can be added as a dependency of a",
"if not raises ``SkipComponent`` so that the dependent component will not fire. Can",
"import component from insights.parsers.ps import PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object):",
"dependency of a parser so that the parser only fires if the ``IsIsOpenStackCompute``",
"Can be added as a dependency of a parser so that the parser",
"so that the dependent component will not fire. Can be added as a",
"import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine",
"only fires if the ``IsIsOpenStackCompute`` dependency is met. \"\"\" from insights.core.plugins import component",
"``IsIsOpenStackCompute`` dependency is met. \"\"\" from insights.core.plugins import component from insights.parsers.ps import PsAuxcww",
"uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks if ``nova-compute`` process",
"not raises ``SkipComponent`` so that the dependent component will not fire. Can be",
"checks if 'nova-compute' process exist, if not raises ``SkipComponent`` so that the dependent",
"from insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser",
"exist. \"\"\" def __init__(self, ps): if 'nova-compute' not in ps.running: raise SkipComponent('Not OpenStack",
"It checks if ``nova-compute`` process exist, if not raises ``SkipComponent``. Raises: SkipComponent: When",
"the parser only fires if the ``IsIsOpenStackCompute`` dependency is met. \"\"\" from insights.core.plugins",
"that the dependent component will not fire. Can be added as a dependency",
"of a parser so that the parser only fires if the ``IsIsOpenStackCompute`` dependency",
"insights.core.plugins import component from insights.parsers.ps import PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww) class",
"not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does not exist. \"\"\" def",
"be added as a dependency of a parser so that the parser only",
"Raises: SkipComponent: When ``nova-compute`` process does not exist. \"\"\" def __init__(self, ps): if",
"determine OpenStack Compute node. It checks if ``nova-compute`` process exist, if not raises",
"``PsAuxcww`` parser to determine OpenStack Compute node. It checks if 'nova-compute' process exist,",
"insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to",
"SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack",
"@component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute",
"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks",
"to determine OpenStack Compute node. It checks if ``nova-compute`` process exist, if not",
"It checks if 'nova-compute' process exist, if not raises ``SkipComponent`` so that the",
"added as a dependency of a parser so that the parser only fires",
"not exist. \"\"\" def __init__(self, ps): if 'nova-compute' not in ps.running: raise SkipComponent('Not",
"determine OpenStack Compute node. It checks if 'nova-compute' process exist, if not raises",
"met. \"\"\" from insights.core.plugins import component from insights.parsers.ps import PsAuxcww from insights.core.dr import",
"raises ``SkipComponent`` so that the dependent component will not fire. Can be added",
"class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node.",
"from insights.parsers.ps import PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute``",
"``nova-compute`` process does not exist. \"\"\" def __init__(self, ps): if 'nova-compute' not in",
"so that the parser only fires if the ``IsIsOpenStackCompute`` dependency is met. \"\"\"",
"if ``nova-compute`` process exist, if not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process",
"dependency is met. \"\"\" from insights.core.plugins import component from insights.parsers.ps import PsAuxcww from",
"process exist, if not raises ``SkipComponent`` so that the dependent component will not",
"exist, if not raises ``SkipComponent`` so that the dependent component will not fire.",
"a parser so that the parser only fires if the ``IsIsOpenStackCompute`` dependency is",
"def __init__(self, ps): if 'nova-compute' not in ps.running: raise SkipComponent('Not OpenStack Compute node')",
"Compute node. It checks if ``nova-compute`` process exist, if not raises ``SkipComponent``. Raises:",
"does not exist. \"\"\" def __init__(self, ps): if 'nova-compute' not in ps.running: raise",
"parser to determine OpenStack Compute node. It checks if ``nova-compute`` process exist, if",
"node. It checks if 'nova-compute' process exist, if not raises ``SkipComponent`` so that",
"if the ``IsIsOpenStackCompute`` dependency is met. \"\"\" from insights.core.plugins import component from insights.parsers.ps",
"IsOpenStackCompute ================== The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node.",
"not fire. Can be added as a dependency of a parser so that",
"\"\"\" IsOpenStackCompute ================== The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute",
"fire. Can be added as a dependency of a parser so that the",
"as a dependency of a parser so that the parser only fires if",
"SkipComponent: When ``nova-compute`` process does not exist. \"\"\" def __init__(self, ps): if 'nova-compute'",
"component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks if ``nova-compute``",
"When ``nova-compute`` process does not exist. \"\"\" def __init__(self, ps): if 'nova-compute' not",
"PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww) class IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww``",
"parser only fires if the ``IsIsOpenStackCompute`` dependency is met. \"\"\" from insights.core.plugins import",
"from insights.core.plugins import component from insights.parsers.ps import PsAuxcww from insights.core.dr import SkipComponent @component(PsAuxcww)",
"process exist, if not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does not",
"IsOpenStackCompute(object): \"\"\"The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It",
"fires if the ``IsIsOpenStackCompute`` dependency is met. \"\"\" from insights.core.plugins import component from",
"parser so that the parser only fires if the ``IsIsOpenStackCompute`` dependency is met.",
"will not fire. Can be added as a dependency of a parser so"
] |
[
"import unittest import json import os from APIInterface.LDSAPI import SourceAPI from .TestFileReader import",
"Grass 12 server\", \"grass\", \"Proxy (CNTLM) - Auth (aio)\", [], 3, \"erin\", \"erinspassword\",",
"(CNTLM) - No Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8",
"from APIInterface.LDSAPI import SourceAPI from .TestFileReader import FileReader from .TestSuper import APITestCase sources",
"17/12/2013 @author: jramsay ''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json import os from APIInterface.LDSAPI",
"No Auth\", [], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\",",
"[], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\", \"grass\", \"Proxy (CNTLM)",
"\"filegdb\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"),",
"json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': ')) print(be,pp) if __name__ ==",
"from .TestFileReader import FileReader from .TestSuper import APITestCase sources = ( (\"Alices' Mapinfo",
"<reponame>josephramsay/LDSAPI ''' Created on 17/12/2013 @author: jramsay ''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json",
"import FileReader from .TestSuper import APITestCase sources = ( (\"Alices' Mapinfo Server\", \"mapinfo\",",
"\"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams()",
"\"Proxy (CNTLM) - No Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS",
"(CNTLM) - Auth (aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2",
"test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True,",
"( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy - No Auth\", [], 3, \"alice\",",
"import json import os from APIInterface.LDSAPI import SourceAPI from .TestFileReader import FileReader from",
"Server\", \"arcgis\", \"Proxy (CNTLM) - No Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"),",
"SERVER\", \"postgis\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\",",
"jramsay ''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json import os from APIInterface.LDSAPI import SourceAPI",
"(Corp Web) - No Auth\", [], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's",
"\"Proxy (Corp Web) - Auth (aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING",
"\"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy (Corp Web) - No",
"(\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp Web) - Auth (aio)\", [], 3,",
"\"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def",
"3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp Web) -",
"\"Proxy (Corp Web) - Auth (aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") )",
"\"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM) - No Auth\", [],",
"SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api = None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self):",
"\"postgis\", \"Proxy (Corp Web) - No Auth\", [], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\",",
"\"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\", \"grass\", \"Proxy (CNTLM) - Auth (aio)\", [],",
"Server\", \"mapinfo\", \"No Proxy - No Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"),",
"Server\", \"spatialite\", \"No Proxy - Auth (aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"),",
"setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api = None def test_21_GetNoProxyAuth(self):",
"\"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy (Corp Web) - No Auth\",",
"(\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM) - No Auth\", [], 3, \"bob\",",
") class SourcesTester(APITestCase): def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api",
"self.api.setParams() def tearDown(self): self.api = None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect()",
"\"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM) - No Auth\", [], 3,",
"''' Created on 17/12/2013 @author: jramsay ''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json import",
"(\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No Proxy - Auth (aio)\", [], 3, \"dan\",",
"\"@yearly\"), (\"Erin's Grass 12 server\", \"grass\", \"Proxy (CNTLM) - Auth (aio)\", [], 3,",
"\"Proxy (CNTLM) - Auth (aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB",
"\"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy (Corp Web) -",
"def tearDown(self): self.api = None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be",
"- Auth (aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\",",
"\"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp Web) - Auth",
"- No Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\",",
"\"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No Proxy - Auth",
"[], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def setUp(self): print('S') self.api",
"= None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp",
"Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy",
"APITestCase sources = ( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy - No Auth\",",
"\"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp Web) - Auth (aio)\",",
"SourceAPI from .TestFileReader import FileReader from .TestSuper import APITestCase sources = ( (\"Alices'",
"Server\", \"postgis\", \"Proxy (Corp Web) - No Auth\", [], 3, \"carol\", \"carols password\",",
"(\"Erin's Grass 12 server\", \"grass\", \"Proxy (CNTLM) - Auth (aio)\", [], 3, \"erin\",",
"[], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp",
"\"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp Web) - Auth (aio)\", [],",
"be = json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': ')) print(be,pp) if",
"Proxy - Auth (aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12",
"\"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No Proxy - Auth (aio)\", [], 3,",
"import SourceAPI from .TestFileReader import FileReader from .TestSuper import APITestCase sources = (",
"PG SERVER\", \"postgis\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"pguser\", \"pgpass\",",
"(\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy (Corp Web) - No Auth\", [], 3,",
"def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ':",
"(aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy",
"\"arcgis\", \"Proxy (CNTLM) - No Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's",
"3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No Proxy",
"self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4,",
"\"mapinfo\", \"No Proxy - No Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's",
"test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': '))",
"= json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': ')) print(be,pp) if __name__",
"Proxy - No Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10",
"server\", \"grass\", \"Proxy (CNTLM) - Auth (aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"),",
"self.api = None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res)",
"os from APIInterface.LDSAPI import SourceAPI from .TestFileReader import FileReader from .TestSuper import APITestCase",
"(aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\", \"grass\", \"Proxy",
"Mapinfo Server\", \"mapinfo\", \"No Proxy - No Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\",",
"on 17/12/2013 @author: jramsay ''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json import os from",
".TestFileReader import FileReader from .TestSuper import APITestCase sources = ( (\"Alices' Mapinfo Server\",",
"self.api.connect() be = json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': ')) print(be,pp)",
"10 Server\", \"arcgis\", \"Proxy (CNTLM) - No Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\",",
"\"spatialite\", \"No Proxy - Auth (aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's",
"No Auth\", [], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\",",
"\"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No Proxy - Auth (aio)\", [],",
"''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json import os from APIInterface.LDSAPI import SourceAPI from",
"import os from APIInterface.LDSAPI import SourceAPI from .TestFileReader import FileReader from .TestSuper import",
"= SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api = None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def",
"(Corp Web) - Auth (aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class",
"\"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No Proxy -",
"= json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': ')) print(be,pp) if __name__ == '__main__': unittest.main()",
"unittest import json import os from APIInterface.LDSAPI import SourceAPI from .TestFileReader import FileReader",
"\"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile)",
"- Auth (aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\",",
"\"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp Web) - Auth",
"\"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\", \"grass\", \"Proxy (CNTLM) - Auth (aio)\",",
"\"@daily\") ) class SourcesTester(APITestCase): def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self):",
"3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp Web)",
"Auth\", [], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\",",
"[], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM)",
"\"Proxy (Corp Web) - No Auth\", [], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"),",
"3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy (Corp Web)",
"Auth (aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def setUp(self):",
"\"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp Web) - Auth (aio)\", [],",
"[], 3, \"bob\", \"bobspassword\", \"https://bob.example.com/ArcGIS/rest/services\", \"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy (Corp",
"(aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def setUp(self): print('S')",
"(aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp",
"\"@daily\"), (\"Carol's PostGIS 8 Server\", \"postgis\", \"Proxy (Corp Web) - No Auth\", [],",
"\"No Proxy - No Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS",
"3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def setUp(self): print('S') self.api =",
"PostGIS 8 Server\", \"postgis\", \"Proxy (Corp Web) - No Auth\", [], 3, \"carol\",",
"SourcesTester(APITestCase): def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api = None",
"sources = ( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy - No Auth\", [],",
"Web) - Auth (aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\",",
"self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',',",
"= ( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy - No Auth\", [], 3,",
"\"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp Web) -",
"Created on 17/12/2013 @author: jramsay ''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json import os",
"8 Server\", \"postgis\", \"Proxy (Corp Web) - No Auth\", [], 3, \"carol\", \"carols",
"\"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp Web) - Auth (aio)\", [], 3,",
"2 Server\", \"filegdb\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"frank\", \"frankspassword\",",
"3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM) -",
"def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp = json.dumps(self.api.res,",
"\"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM) - No",
"\"postgis\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\")",
"FileReader from .TestSuper import APITestCase sources = ( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No",
"(Corp Web) - Auth (aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG",
"Web) - Auth (aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase):",
"Web) - No Auth\", [], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite",
"tearDown(self): self.api = None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be =",
"@author: jramsay ''' #https://koordinates.com/services/api/v1/sources/1/ import unittest import json import os from APIInterface.LDSAPI import",
"\"grass\", \"Proxy (CNTLM) - Auth (aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's",
"from .TestSuper import APITestCase sources = ( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy",
"- Auth (aio)\", [], 3, \"pguser\", \"pgpass\", \"https://linz.govt.nz/PostGIS/rest/services\", \"@daily\") ) class SourcesTester(APITestCase): def",
"None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res) def test_30_BasicJSONWrite(self): self.api.connect() be = json.dumps(self.api.res) pp =",
"SpatiaLite 4 Server\", \"spatialite\", \"No Proxy - Auth (aio)\", [], 3, \"dan\", \"danspassword\",",
"def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api = None def",
"pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': ')) print(be,pp) if __name__ == '__main__':",
"class SourcesTester(APITestCase): def setUp(self): print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api =",
"- No Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\",",
"APIInterface.LDSAPI import SourceAPI from .TestFileReader import FileReader from .TestSuper import APITestCase sources =",
"#https://koordinates.com/services/api/v1/sources/1/ import unittest import json import os from APIInterface.LDSAPI import SourceAPI from .TestFileReader",
"ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM) - No Auth\", [], 3, \"bob\", \"bobspassword\",",
"self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api = None def test_21_GetNoProxyAuth(self): self.api.connect() self.api.dispRes(self.api.res)",
".TestSuper import APITestCase sources = ( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy -",
"print('S') self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile) self.api.setParams() def tearDown(self): self.api = None def test_21_GetNoProxyAuth(self): self.api.connect()",
"Server\", \"filegdb\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\",",
"No Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\",",
"[], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp Web)",
"\"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp Web) - Auth (aio)\",",
"- No Auth\", [], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4",
"[], 3, \"carol\", \"carols password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No",
"Auth (aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\", \"filegdb\",",
"Auth (aio)\", [], 3, \"frank\", \"frankspassword\", \"https://frank.example.com/FileGDB/rest/services\", \"@occasionally\"), (\"WORKING PG SERVER\", \"postgis\", \"Proxy",
"\"No Proxy - Auth (aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass",
"\"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\", \"grass\", \"Proxy (CNTLM) - Auth",
"(\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy - No Auth\", [], 3, \"alice\", \"alicespassword\",",
"password\", \"https://carol.example.com/PostGis/rest/services\", \"@weekly\"), (\"Dan's SpatiaLite 4 Server\", \"spatialite\", \"No Proxy - Auth (aio)\",",
"4 Server\", \"spatialite\", \"No Proxy - Auth (aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\",",
"import APITestCase sources = ( (\"Alices' Mapinfo Server\", \"mapinfo\", \"No Proxy - No",
"- Auth (aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\", \"@quarterly\"), (\"Frank's FileGDB 2 Server\",",
"12 server\", \"grass\", \"Proxy (CNTLM) - Auth (aio)\", [], 3, \"erin\", \"erinspassword\", \"https://erin.example.com/Grass/rest/services\",",
"3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\", \"grass\", \"Proxy (CNTLM) -",
"json import os from APIInterface.LDSAPI import SourceAPI from .TestFileReader import FileReader from .TestSuper",
"\"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy (CNTLM) - No Auth\",",
"(\"WORKING PG SERVER\", \"postgis\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"pguser\",",
"FileGDB 2 Server\", \"filegdb\", \"Proxy (Corp Web) - Auth (aio)\", [], 3, \"frank\",",
"Auth\", [], 3, \"alice\", \"alicespassword\", \"https://alice.example.com/Mapinfo/rest/services\", \"@hourly\"), (\"Bob's ArcGIS 10 Server\", \"arcgis\", \"Proxy",
"Auth (aio)\", [], 3, \"dan\", \"danspassword\", \"https://dan.example.com/SpatiaLite/rest/services\", \"@yearly\"), (\"Erin's Grass 12 server\", \"grass\","
] |
[
"defining the properties and assigning None to them def __init__(self, ID=None, salary=0, department=None):",
"an object of the Employee class with default parameters Steve = Employee() Mark",
"department=None): self.ID = ID self.salary = salary self.department = department # creating an",
":\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\", Mark.ID) print(\"Salary :\", Mark.salary) print(\"Department",
"ID self.salary = salary self.department = department # creating an object of the",
"\"Human Resources\") # Printing properties of Steve and Mark print(\"Steve\") print(\"ID :\", Steve.ID)",
"Steve and Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department)",
":\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\", Mark.ID) print(\"Salary",
"class Employee: # defining the properties and assigning None to them def __init__(self,",
"class with default parameters Steve = Employee() Mark = Employee(\"3789\", 2500, \"Human Resources\")",
"and Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\")",
"self.salary = salary self.department = department # creating an object of the Employee",
"Employee() Mark = Employee(\"3789\", 2500, \"Human Resources\") # Printing properties of Steve and",
"salary self.department = department # creating an object of the Employee class with",
"Printing properties of Steve and Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary)",
"self.department = department # creating an object of the Employee class with default",
"default parameters Steve = Employee() Mark = Employee(\"3789\", 2500, \"Human Resources\") # Printing",
"print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\", Mark.ID) print(\"Salary :\", Mark.salary) print(\"Department :\", Mark.department)",
"# defining the properties and assigning None to them def __init__(self, ID=None, salary=0,",
"Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\", Mark.ID) print(\"Salary :\", Mark.salary) print(\"Department :\",",
"with default parameters Steve = Employee() Mark = Employee(\"3789\", 2500, \"Human Resources\") #",
"and assigning None to them def __init__(self, ID=None, salary=0, department=None): self.ID = ID",
"salary=0, department=None): self.ID = ID self.salary = salary self.department = department # creating",
"Steve = Employee() Mark = Employee(\"3789\", 2500, \"Human Resources\") # Printing properties of",
"print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\", Mark.ID) print(\"Salary :\", Mark.salary)",
"= ID self.salary = salary self.department = department # creating an object of",
"Employee class with default parameters Steve = Employee() Mark = Employee(\"3789\", 2500, \"Human",
"__init__(self, ID=None, salary=0, department=None): self.ID = ID self.salary = salary self.department = department",
"object of the Employee class with default parameters Steve = Employee() Mark =",
"Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID",
"Resources\") # Printing properties of Steve and Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary",
"Mark = Employee(\"3789\", 2500, \"Human Resources\") # Printing properties of Steve and Mark",
"creating an object of the Employee class with default parameters Steve = Employee()",
"= department # creating an object of the Employee class with default parameters",
"parameters Steve = Employee() Mark = Employee(\"3789\", 2500, \"Human Resources\") # Printing properties",
"of the Employee class with default parameters Steve = Employee() Mark = Employee(\"3789\",",
"assigning None to them def __init__(self, ID=None, salary=0, department=None): self.ID = ID self.salary",
"of Steve and Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\",",
"Employee: # defining the properties and assigning None to them def __init__(self, ID=None,",
"properties and assigning None to them def __init__(self, ID=None, salary=0, department=None): self.ID =",
"them def __init__(self, ID=None, salary=0, department=None): self.ID = ID self.salary = salary self.department",
"the properties and assigning None to them def __init__(self, ID=None, salary=0, department=None): self.ID",
"= Employee() Mark = Employee(\"3789\", 2500, \"Human Resources\") # Printing properties of Steve",
"= salary self.department = department # creating an object of the Employee class",
"<filename>02_oop/01_classes_and_objects/03_initializing_with_optional_params.py class Employee: # defining the properties and assigning None to them def",
"print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\",",
"Employee(\"3789\", 2500, \"Human Resources\") # Printing properties of Steve and Mark print(\"Steve\") print(\"ID",
"= Employee(\"3789\", 2500, \"Human Resources\") # Printing properties of Steve and Mark print(\"Steve\")",
"properties of Steve and Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department",
"ID=None, salary=0, department=None): self.ID = ID self.salary = salary self.department = department #",
"print(\"ID :\", Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\", Mark.ID)",
"the Employee class with default parameters Steve = Employee() Mark = Employee(\"3789\", 2500,",
"self.ID = ID self.salary = salary self.department = department # creating an object",
"department # creating an object of the Employee class with default parameters Steve",
"2500, \"Human Resources\") # Printing properties of Steve and Mark print(\"Steve\") print(\"ID :\",",
"def __init__(self, ID=None, salary=0, department=None): self.ID = ID self.salary = salary self.department =",
"None to them def __init__(self, ID=None, salary=0, department=None): self.ID = ID self.salary =",
"Steve.ID) print(\"Salary :\", Steve.salary) print(\"Department :\", Steve.department) print(\"Mark\") print(\"ID :\", Mark.ID) print(\"Salary :\",",
"# Printing properties of Steve and Mark print(\"Steve\") print(\"ID :\", Steve.ID) print(\"Salary :\",",
"# creating an object of the Employee class with default parameters Steve =",
"to them def __init__(self, ID=None, salary=0, department=None): self.ID = ID self.salary = salary"
] |
[
"import path from . import views \"\"\"Url pattern for dashboard view.\"\"\" urlpatterns =",
"path from . import views \"\"\"Url pattern for dashboard view.\"\"\" urlpatterns = [",
"views \"\"\"Url pattern for dashboard view.\"\"\" urlpatterns = [ path('', views.index, name=\"home\"), ]",
"<gh_stars>0 from django.urls import path from . import views \"\"\"Url pattern for dashboard",
". import views \"\"\"Url pattern for dashboard view.\"\"\" urlpatterns = [ path('', views.index,",
"from django.urls import path from . import views \"\"\"Url pattern for dashboard view.\"\"\"",
"django.urls import path from . import views \"\"\"Url pattern for dashboard view.\"\"\" urlpatterns",
"from . import views \"\"\"Url pattern for dashboard view.\"\"\" urlpatterns = [ path('',",
"import views \"\"\"Url pattern for dashboard view.\"\"\" urlpatterns = [ path('', views.index, name=\"home\"),"
] |
[
"import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER",
"device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True",
"async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url) raw = await response.read() if",
"camera image\", self.entity_id) self._online = False except aiohttp.ClientError as err: if self._online: _LOGGER.error(",
"response = await websession.get(url) raw = await response.read() if not raw: _LOGGER.error(f\"No control",
"command == 'status' else RG_CONTROL try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response",
"self._online = False return self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion",
"return except aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion detection control at {url}: \"",
"Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async def",
"return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting camera image\",",
"params, query, fragment) url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url",
"url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass = hass",
"found\") self._motion_detection_active = False elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active = True else:",
"self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image = None self._last_status",
"about this platform, please refer to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio",
"hass, device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass = hass self._name = device_info.get(CONF_NAME)",
"self._last_image = None self._last_status = None self._motion_detection_active = False self.is_streaming = False #",
"\"\"\"Return the camera brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the camera motion",
"self._online = True self._last_image = None self._last_status = None self._motion_detection_active = False self.is_streaming",
"motion_detection_enabled(self): \"\"\"Return the camera motion detection status.\"\"\" return self._motion_detection_active @property def model(self): \"\"\"Return",
"the camera.\"\"\" if self._control_url is None: self._motion_detection_active = False return url = self._control_url",
"the camera model.\"\"\" return \"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion detection",
"model(self): \"\"\"Return the camera model.\"\"\" return \"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable",
"= DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection:",
"= 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA",
"\"\"\"Return a still image response from the camera.\"\"\" try: websession = async_get_clientsession(self.hass) with",
"RG_CONTROL try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url) raw",
"status pull as well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return",
"about to be added.\"\"\" # TODO add some periodic status pull as well!",
"= device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc, url, params, query, fragment) url_p =",
"RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async def async_setup_platform(hass, config, async_add_entities,",
"the camera motion detection status.\"\"\" return self._motion_detection_active @property def model(self): \"\"\"Return the camera",
"response from the camera.\"\"\" try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response =",
"as err: if self._online: _LOGGER.error( \"%s: ClientError getting new camera image: %s\", self.name,",
"= False self.is_streaming = False # self._motion_detected = False async def async_added_to_hass(self): \"\"\"Handle",
"vol from homeassistant.const import CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from",
"the motion detection status of the camera.\"\"\" if self._control_url is None: self._motion_detection_active =",
"from {url}: \" f\"{raw}, no pattern found\") self._motion_detection_active = False elif status_found[0] in",
"for MotionEye Cameras. For more details about this platform, please refer to the",
"Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import config_validation as cv from homeassistant.util.dt",
"= False return url = self._control_url + command reg_expr = RG_STATUS if command",
"_LOGGER.error(f\"Error in motion detection control at {url}: \" f\"{str(err)}\") @property def name(self): \"\"\"Return",
"_LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL",
"self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion detection in",
"async_add_entities, discovery_info=None): \"\"\"Set up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A",
"model.\"\"\" return \"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion detection in the",
"url, params, query, fragment) url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID]",
"= device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc,",
"'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean,",
"async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class",
"{url}: \" f\"{raw}, no pattern found\") self._motion_detection_active = False elif status_found[0] in ['ACTIVE',",
"Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very simple implementation of a MotionEye camera,",
"discovery_info=None): \"\"\"Set up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very",
"self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id)",
"entity which are about to be added.\"\"\" # TODO add some periodic status",
"image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a still image response",
"(scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true if the device",
"of the camera.\"\"\" if self._control_url is None: self._motion_detection_active = False return url =",
"added.\"\"\" # TODO add some periodic status pull as well! (scan_interval = 120)",
"import async_get_clientsession from homeassistant.helpers import config_validation as cv from homeassistant.util.dt import utcnow #",
"not None and (utcnow() - self._last_status).total_seconds() < 60)): return self._last_image await self.async_get_camera_motion_status(command='status') except",
"homeassistant.helpers import config_validation as cv from homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off #",
"response.read() if not self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online = True if",
"homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off",
"websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url) raw = await",
"= False self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control at",
"not self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online = True if (self._control_url is",
"DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL,",
"self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if",
"to be added.\"\"\" # TODO add some periodic status pull as well! (scan_interval",
"is not None and (utcnow() - self._last_status).total_seconds() < 60)): return self._last_image await self.async_get_camera_motion_status(command='status')",
"from homeassistant.helpers import config_validation as cv from homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off",
"vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile('",
"camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of camera",
"None: self._motion_detection_active = False return url = self._control_url + command reg_expr = RG_STATUS",
"def model(self): \"\"\"Return the camera model.\"\"\" return \"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self):",
"import CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession",
"with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url) raw = await response.read() if not",
"self._motion_detection_active = False self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control",
"import voluptuous as vol from homeassistant.const import CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA,",
"command='status'): \"\"\"Asks for the motion detection status of the camera.\"\"\" if self._control_url is",
"MotionEye camera, using the snapshot url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize a generic",
"= await response.read() if not self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online =",
"await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\" self.is_streaming =",
") self._online = False return self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the",
"\"\"\"Asks for the motion detection status of the camera.\"\"\" if self._control_url is None:",
"loop=self.hass.loop): response = await websession.get(url) raw = await response.read() if not raw: _LOGGER.error(f\"No",
"@property def brand(self): \"\"\"Return the camera brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return",
"async def async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause')",
"camera image\", self.entity_id) self._online = True if (self._control_url is None or (self._last_status is",
"+ command reg_expr = RG_STATUS if command == 'status' else RG_CONTROL try: websession",
"of a MotionEye camera, using the snapshot url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize",
"Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion detection in the camera.\"\"\" self.is_streaming = True",
"camera brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the camera motion detection status.\"\"\"",
"ClientError getting new camera image: %s\", self.name, err ) self._online = False return",
"60)): return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting camera",
"= re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):",
"true if the device is recording.\"\"\" # return self._motion_detected return self._motion_detection_active @property def",
"== 'status' else RG_CONTROL try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response =",
"camera.\"\"\" super().__init__() self.hass = hass self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url =",
"Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion detection in the camera.\"\"\" self.is_streaming =",
"response in {url}\") status_found = reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control response from",
"homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import",
"return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the camera motion detection status.\"\"\" return self._motion_detection_active",
"control response in {url}\") status_found = reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control response",
"= urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\"",
"please refer to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging from",
"a still image response from the camera.\"\"\" try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10,",
"query, fragment) url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url =",
"self._online = False except aiohttp.ClientError as err: if self._online: _LOGGER.error( \"%s: ClientError getting",
"in {url}\") status_found = reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control response from {url}:",
"False async def async_added_to_hass(self): \"\"\"Handle all entity which are about to be added.\"\"\"",
"in camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of",
"f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image = None self._last_status = None self._motion_detection_active",
"except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id) self._online = False",
"detection status of the camera.\"\"\" if self._control_url is None: self._motion_detection_active = False return",
"default=1): cv.positive_int }) RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection",
"await websession.get(url) raw = await response.read() if not raw: _LOGGER.error(f\"No control response in",
"all entity which are about to be added.\"\"\" # TODO add some periodic",
"raw: _LOGGER.error(f\"No control response in {url}\") status_found = reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad",
"self._control_url is None: self._motion_detection_active = False return url = self._control_url + command reg_expr",
"try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url) raw =",
"except aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion detection control at {url}: \" f\"{str(err)}\")",
"snapshot url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass =",
"urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" )",
"cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image",
"as err: _LOGGER.error(f\"Error in motion detection control at {url}: \" f\"{str(err)}\") @property def",
"self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control at {url}\") #",
"config, async_add_entities, discovery_info=None): \"\"\"Set up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera):",
") self._online = True self._last_image = None self._last_status = None self._motion_detection_active = False",
"asyncio import logging from urllib.parse import urlparse import re import aiohttp import async_timeout",
"periodic status pull as well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self):",
"120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true if the device is recording.\"\"\"",
"await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id) self._online",
"\"\"\"A very simple implementation of a MotionEye camera, using the snapshot url.\"\"\" def",
"https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging from urllib.parse import urlparse import re import",
"control response from {url}: \" f\"{raw}, no pattern found\") self._motion_detection_active = False elif",
"async_added_to_hass(self): \"\"\"Handle all entity which are about to be added.\"\"\" # TODO add",
"camera image: %s\", self.name, err ) self._online = False return self._last_image async def",
"False elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active = True else: self._motion_detection_active = False",
"\"%s: ClientError getting new camera image: %s\", self.name, err ) self._online = False",
"cv from homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control:",
"= 120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true if the device is",
"is None: self._motion_detection_active = False return url = self._control_url + command reg_expr =",
"For more details about this platform, please refer to the documentation at https://home-assistant.io/components/camera.motioneye/",
"'resumed']: self._motion_detection_active = True else: self._motion_detection_active = False self._last_status = utcnow() except asyncio.TimeoutError:",
"re import aiohttp import async_timeout import voluptuous as vol from homeassistant.const import CONF_NAME",
"self._motion_detection_active = False elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active = True else: self._motion_detection_active",
"cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS = re.compile(' Detection status",
"err ) self._online = False return self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for",
"def async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state()",
"camera, using the snapshot url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize a generic camera.\"\"\"",
"= False return self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion detection",
"self.is_streaming = False # self._motion_detected = False async def async_added_to_hass(self): \"\"\"Handle all entity",
"async_get_clientsession from homeassistant.helpers import config_validation as cv from homeassistant.util.dt import utcnow # TODO",
"import asyncio import logging from urllib.parse import urlparse import re import aiohttp import",
"device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL]",
"asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control at {url}\") # return except aiohttp.ClientError as",
"= device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image =",
"motion detection status of the camera.\"\"\" if self._control_url is None: self._motion_detection_active = False",
"# return except aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion detection control at {url}:",
"= re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument",
"self._with_motion_detection: # ParseResult(scheme, netloc, url, params, query, fragment) url_p = urlparse(self._snapshot_url) control_port =",
"status.\"\"\" return self._motion_detection_active @property def model(self): \"\"\"Return the camera model.\"\"\" return \"MotionEye Snapshot",
"self.entity_id) self._online = True if (self._control_url is None or (self._last_status is not None",
"be added.\"\"\" # TODO add some periodic status pull as well! (scan_interval =",
"self._motion_detected = False async def async_added_to_hass(self): \"\"\"Handle all entity which are about to",
"camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a still image",
"= False except aiohttp.ClientError as err: if self._online: _LOGGER.error( \"%s: ClientError getting new",
"status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active = True else: self._motion_detection_active = False self._last_status =",
"async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass,",
"detection status.\"\"\" return self._motion_detection_active @property def model(self): \"\"\"Return the camera model.\"\"\" return \"MotionEye",
"\"\"\"Return bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return",
"= await websession.get(url) raw = await response.read() if not raw: _LOGGER.error(f\"No control response",
"self._motion_detection_active = False self.is_streaming = False # self._motion_detected = False async def async_added_to_hass(self):",
"@property def motion_detection_enabled(self): \"\"\"Return the camera motion detection status.\"\"\" return self._motion_detection_active @property def",
"\"\"\"Handle all entity which are about to be added.\"\"\" # TODO add some",
"True else: self._motion_detection_active = False self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion",
"from the camera.\"\"\" try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await",
"response from {url}: \" f\"{raw}, no pattern found\") self._motion_detection_active = False elif status_found[0]",
"self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a still image response from the camera.\"\"\"",
"- self._last_status).total_seconds() < 60)): return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s:",
"async def async_camera_image(self): \"\"\"Return a still image response from the camera.\"\"\" try: websession",
"as vol from homeassistant.const import CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera)",
"except aiohttp.ClientError as err: if self._online: _LOGGER.error( \"%s: ClientError getting new camera image:",
"async def async_added_to_hass(self): \"\"\"Handle all entity which are about to be added.\"\"\" #",
"= await websession.get(self._snapshot_url) self._last_image = await response.read() if not self._online: _LOGGER.warning(\"%s: Recovered camera",
"pattern found\") self._motion_detection_active = False elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active = True",
"CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera'",
"if the device is recording.\"\"\" # return self._motion_detected return self._motion_detection_active @property def brand(self):",
"# TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__)",
"fragment) url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = (",
"await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(),",
"homeassistant.const import CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import",
"device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc, url, params, query, fragment) url_p = urlparse(self._snapshot_url)",
"cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL =",
"= async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image = await response.read()",
"None or (self._last_status is not None and (utcnow() - self._last_status).total_seconds() < 60)): return",
"async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image = await response.read() if",
"# TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port'",
"getting camera image\", self.entity_id) self._online = False except aiohttp.ClientError as err: if self._online:",
"self._motion_detected return self._motion_detection_active @property def brand(self): \"\"\"Return the camera brand.\"\"\" return \"MotionEye\" @property",
"\"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the camera motion detection status.\"\"\" return self._motion_detection_active @property",
"url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\"",
"if not raw: _LOGGER.error(f\"No control response in {url}\") status_found = reg_expr.findall(raw.decode()) if not",
"disable=unused-argument async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up a generic IP Camera.\"\"\"",
"\" f\"{raw}, no pattern found\") self._motion_detection_active = False elif status_found[0] in ['ACTIVE', 'resumed']:",
"= await response.read() if not raw: _LOGGER.error(f\"No control response in {url}\") status_found =",
"def async_enable_motion_detection(self): \"\"\"Enable motion detection in the camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start')",
"\"\"\" import asyncio import logging from urllib.parse import urlparse import re import aiohttp",
"elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active = True else: self._motion_detection_active = False self._last_status",
"asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a still image response from the",
"if not status_found: _LOGGER.error(f\"Bad control response from {url}: \" f\"{raw}, no pattern found\")",
"device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image = None",
"from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers",
"self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a still image response from the camera.\"\"\" try:",
"self.hass = hass self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url",
"def is_recording(self): \"\"\"Return true if the device is recording.\"\"\" # return self._motion_detected return",
"self.entity_id) self._online = False except aiohttp.ClientError as err: if self._online: _LOGGER.error( \"%s: ClientError",
"motion detection control at {url}: \" f\"{str(err)}\") @property def name(self): \"\"\"Return the name",
"\"\"\" Support for MotionEye Cameras. For more details about this platform, please refer",
"False # self._motion_detected = False async def async_added_to_hass(self): \"\"\"Handle all entity which are",
"self._last_status).total_seconds() < 60)): return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout",
"True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\" self.is_streaming",
"\"\"\"Set up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very simple",
"import async_timeout import voluptuous as vol from homeassistant.const import CONF_NAME from homeassistant.components.camera import",
"CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME",
"async_timeout import voluptuous as vol from homeassistant.const import CONF_NAME from homeassistant.components.camera import (",
"= utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control at {url}\") # return",
"and (utcnow() - self._last_status).total_seconds() < 60)): return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if",
"self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc, url, params,",
"vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS = re.compile('",
"else: self._motion_detection_active = False self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection",
"if self._online: _LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id) self._online = False except aiohttp.ClientError",
"vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1):",
"logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control'",
"control at {url}: \" f\"{str(err)}\") @property def name(self): \"\"\"Return the name of this",
"at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging from urllib.parse import urlparse import re",
"from homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control: curl",
"= 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL):",
"curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL =",
"= device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection =",
"import config_validation as cv from homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO",
"None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc, url, params, query, fragment)",
"vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n')",
"the device is recording.\"\"\" # return self._motion_detected return self._motion_detection_active @property def brand(self): \"\"\"Return",
"websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image = await",
"response.read() if not raw: _LOGGER.error(f\"No control response in {url}\") status_found = reg_expr.findall(raw.decode()) if",
"False return self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion detection status",
"RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') # pylint:",
"__init__(self, hass, device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass = hass self._name =",
"implementation of a MotionEye camera, using the snapshot url.\"\"\" def __init__(self, hass, device_info):",
"# self._motion_detected = False async def async_added_to_hass(self): \"\"\"Handle all entity which are about",
"await response.read() if not raw: _LOGGER.error(f\"No control response in {url}\") status_found = reg_expr.findall(raw.decode())",
"implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID =",
"async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def",
"# ParseResult(scheme, netloc, url, params, query, fragment) url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT]",
"return url = self._control_url + command reg_expr = RG_STATUS if command == 'status'",
"False self.is_streaming = False # self._motion_detected = False async def async_added_to_hass(self): \"\"\"Handle all",
"import urlparse import re import aiohttp import async_timeout import voluptuous as vol from",
"= PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int,",
"await response.read() if not self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online = True",
"camera_image(self): \"\"\"Return bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self):",
"vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int })",
"{url}\") # return except aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion detection control at",
"voluptuous as vol from homeassistant.const import CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE,",
"Recovered camera image\", self.entity_id) self._online = True if (self._control_url is None or (self._last_status",
"return self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion detection status of",
"the snapshot url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass",
"= True else: self._motion_detection_active = False self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in",
"default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL",
"default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS = re.compile(' Detection",
"utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control at {url}\") # return except",
"def motion_detection_enabled(self): \"\"\"Return the camera motion detection status.\"\"\" return self._motion_detection_active @property def model(self):",
"= logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL =",
"@property def model(self): \"\"\"Return the camera model.\"\"\" return \"MotionEye Snapshot Camera\" async def",
"TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID",
"def __init__(self, hass, device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass = hass self._name",
"command reg_expr = RG_STATUS if command == 'status' else RG_CONTROL try: websession =",
"\"\"\"Return the camera motion detection status.\"\"\" return self._motion_detection_active @property def model(self): \"\"\"Return the",
"import re import aiohttp import async_timeout import voluptuous as vol from homeassistant.const import",
"self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async",
"_LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id) self._online = False except aiohttp.ClientError as err:",
"PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999):",
"= self._control_url + command reg_expr = RG_STATUS if command == 'status' else RG_CONTROL",
"ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id'",
"False return url = self._control_url + command reg_expr = RG_STATUS if command ==",
"TODO add some periodic status pull as well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status')",
"class MotionEyeCamera(Camera): \"\"\"A very simple implementation of a MotionEye camera, using the snapshot",
"IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very simple implementation of a MotionEye",
"async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image = await response.read() if not self._online:",
"= False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of camera image.\"\"\" return",
"'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url,",
"self._last_status = None self._motion_detection_active = False self.is_streaming = False # self._motion_detected = False",
"documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging from urllib.parse import urlparse import",
"cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS =",
"async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion detection status of the camera.\"\"\"",
"in motion detection control at {url}: \" f\"{str(err)}\") @property def name(self): \"\"\"Return the",
"async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion detection status of the camera.\"\"\" if self._control_url",
"return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a still image response from",
"homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import config_validation as cv from homeassistant.util.dt import utcnow",
"self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result()",
"config_validation as cv from homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement",
"err: _LOGGER.error(f\"Error in motion detection control at {url}: \" f\"{str(err)}\") @property def name(self):",
"async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url) raw = await response.read() if not raw:",
"'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,",
"aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion detection control at {url}: \" f\"{str(err)}\") @property",
"as cv from homeassistant.util.dt import utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies",
"Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT,",
"up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very simple implementation",
"= 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME =",
"not raw: _LOGGER.error(f\"No control response in {url}\") status_found = reg_expr.findall(raw.decode()) if not status_found:",
"def brand(self): \"\"\"Return the camera brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the",
"self.name, err ) self._online = False return self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks",
"self._control_url + command reg_expr = RG_STATUS if command == 'status' else RG_CONTROL try:",
"raw = await response.read() if not raw: _LOGGER.error(f\"No control response in {url}\") status_found",
"device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc, url,",
"RG_STATUS if command == 'status' else RG_CONTROL try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10,",
"async_camera_image(self): \"\"\"Return a still image response from the camera.\"\"\" try: websession = async_get_clientsession(self.hass)",
"Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up",
"True self._last_image = None self._last_status = None self._motion_detection_active = False self.is_streaming = False",
"= 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME):",
"self._last_image = await response.read() if not self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online",
"no pattern found\") self._motion_detection_active = False elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active =",
"PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID,",
"CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME,",
"self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online = True if (self._control_url is None",
"at {url}\") # return except aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion detection control",
"detection control at {url}\") # return except aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion",
"a generic camera.\"\"\" super().__init__() self.hass = hass self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE",
"self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\" self.is_streaming = False",
"hass self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None",
"return \"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion detection in the camera.\"\"\"",
"self._last_image async def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion detection status of the",
"urllib.parse import urlparse import re import aiohttp import async_timeout import voluptuous as vol",
"def async_get_camera_motion_status(self, command='status'): \"\"\"Asks for the motion detection status of the camera.\"\"\" if",
"if self._control_url is None: self._motion_detection_active = False return url = self._control_url + command",
"_LOGGER.warning(f\"Timeout in motion detection control at {url}\") # return except aiohttp.ClientError as err:",
"\" f\"{str(err)}\") @property def name(self): \"\"\"Return the name of this device.\"\"\" return self._name",
"MotionEye Cameras. For more details about this platform, please refer to the documentation",
"import logging from urllib.parse import urlparse import re import aiohttp import async_timeout import",
"is None or (self._last_status is not None and (utcnow() - self._last_status).total_seconds() < 60)):",
"async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very simple implementation of a MotionEye camera, using",
"Cameras. For more details about this platform, please refer to the documentation at",
"which are about to be added.\"\"\" # TODO add some periodic status pull",
"def async_added_to_hass(self): \"\"\"Handle all entity which are about to be added.\"\"\" # TODO",
"status_found: _LOGGER.error(f\"Bad control response from {url}: \" f\"{raw}, no pattern found\") self._motion_detection_active =",
"camera.\"\"\" if self._control_url is None: self._motion_detection_active = False return url = self._control_url +",
"None and (utcnow() - self._last_status).total_seconds() < 60)): return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError:",
"status_found = reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control response from {url}: \" f\"{raw},",
"(utcnow() - self._last_status).total_seconds() < 60)): return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online:",
"if (self._control_url is None or (self._last_status is not None and (utcnow() - self._last_status).total_seconds()",
"async_enable_motion_detection(self): \"\"\"Enable motion detection in the camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state()",
"well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true if the",
"utcnow # TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER =",
"= async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url) raw = await response.read()",
"\"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion detection in the camera.\"\"\" self.is_streaming",
"<gh_stars>10-100 \"\"\" Support for MotionEye Cameras. For more details about this platform, please",
"# return self._motion_detected return self._motion_detection_active @property def brand(self): \"\"\"Return the camera brand.\"\"\" return",
"}) RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') #",
"a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very simple implementation of",
"camera.\"\"\" try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image",
"( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import config_validation as",
"return self._motion_detected return self._motion_detection_active @property def brand(self): \"\"\"Return the camera brand.\"\"\" return \"MotionEye\"",
"= False elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active = True else: self._motion_detection_active =",
"return self._motion_detection_active @property def model(self): \"\"\"Return the camera model.\"\"\" return \"MotionEye Snapshot Camera\"",
"generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)]) class MotionEyeCamera(Camera): \"\"\"A very simple implementation of a",
"self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc, url, params, query, fragment) url_p",
"from homeassistant.const import CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client",
"http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT =",
"< 60)): return self._last_image await self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting",
"camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion detection",
"\"\"\"Disable motion detection in camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self):",
"self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection",
"= hass self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url =",
"for the motion detection status of the camera.\"\"\" if self._control_url is None: self._motion_detection_active",
"the camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion",
"= True self._last_image = None self._last_status = None self._motion_detection_active = False self.is_streaming =",
"aiohttp import async_timeout import voluptuous as vol from homeassistant.const import CONF_NAME from homeassistant.components.camera",
"logging from urllib.parse import urlparse import re import aiohttp import async_timeout import voluptuous",
"control_port = device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online",
"f\"{raw}, no pattern found\") self._motion_detection_active = False elif status_found[0] in ['ACTIVE', 'resumed']: self._motion_detection_active",
"motion detection status.\"\"\" return self._motion_detection_active @property def model(self): \"\"\"Return the camera model.\"\"\" return",
"from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import config_validation as cv from homeassistant.util.dt import",
"websession.get(self._snapshot_url) self._last_image = await response.read() if not self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id)",
"the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging from urllib.parse import urlparse",
"is recording.\"\"\" # return self._motion_detected return self._motion_detection_active @property def brand(self): \"\"\"Return the camera",
"( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image = None self._last_status = None",
"some periodic status pull as well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property def",
"new camera image: %s\", self.name, err ) self._online = False return self._last_image async",
"await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true if the device is recording.\"\"\" #",
"if self._with_motion_detection: # ParseResult(scheme, netloc, url, params, query, fragment) url_p = urlparse(self._snapshot_url) control_port",
"\"\"\"Return the camera model.\"\"\" return \"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion",
"def async_camera_image(self): \"\"\"Return a still image response from the camera.\"\"\" try: websession =",
"False except aiohttp.ClientError as err: if self._online: _LOGGER.error( \"%s: ClientError getting new camera",
"'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA =",
"(self._control_url is None or (self._last_status is not None and (utcnow() - self._last_status).total_seconds() <",
"response = await websession.get(self._snapshot_url) self._last_image = await response.read() if not self._online: _LOGGER.warning(\"%s: Recovered",
"cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int",
"detection in camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes",
"of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a still",
"in motion detection control at {url}\") # return except aiohttp.ClientError as err: _LOGGER.error(f\"Error",
"urlparse import re import aiohttp import async_timeout import voluptuous as vol from homeassistant.const",
"_LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online = True if (self._control_url is None or",
"# pylint: disable=unused-argument async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up a generic",
"as well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true if",
"self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme,",
"self.async_get_camera_motion_status(command='status') except asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id) self._online =",
"self._motion_detection_active = False return url = self._control_url + command reg_expr = RG_STATUS if",
"detection control at {url}: \" f\"{str(err)}\") @property def name(self): \"\"\"Return the name of",
"pylint: disable=unused-argument async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up a generic IP",
"netloc, url, params, query, fragment) url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id =",
"aiohttp.ClientError as err: if self._online: _LOGGER.error( \"%s: ClientError getting new camera image: %s\",",
"brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the camera motion detection status.\"\"\" return",
"self._motion_detection_active @property def brand(self): \"\"\"Return the camera brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self):",
"TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off # TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT",
"self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true if the device is recording.\"\"\" # return",
"= None self._motion_detection_active = False self.is_streaming = False # self._motion_detected = False async",
"bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def async_camera_image(self): \"\"\"Return a",
"['ACTIVE', 'resumed']: self._motion_detection_active = True else: self._motion_detection_active = False self._last_status = utcnow() except",
"Support for MotionEye Cameras. For more details about this platform, please refer to",
"if self._online: _LOGGER.error( \"%s: ClientError getting new camera image: %s\", self.name, err )",
"status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async def async_setup_platform(hass,",
"detection in the camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self):",
"add some periodic status pull as well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property",
"CONF_NAME from homeassistant.components.camera import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from",
"if not self._online: _LOGGER.warning(\"%s: Recovered camera image\", self.entity_id) self._online = True if (self._control_url",
"image\", self.entity_id) self._online = True if (self._control_url is None or (self._last_status is not",
"{url}\") status_found = reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control response from {url}: \"",
"= None self._last_status = None self._motion_detection_active = False self.is_streaming = False # self._motion_detected",
"CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({",
"camera model.\"\"\" return \"MotionEye Snapshot Camera\" async def async_enable_motion_detection(self): \"\"\"Enable motion detection in",
"the camera.\"\"\" try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url)",
"(self._last_status is not None and (utcnow() - self._last_status).total_seconds() < 60)): return self._last_image await",
"self._motion_detection_active @property def model(self): \"\"\"Return the camera model.\"\"\" return \"MotionEye Snapshot Camera\" async",
"the camera brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the camera motion detection",
"loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image = await response.read() if not self._online: _LOGGER.warning(\"%s:",
"%s\", self.name, err ) self._online = False return self._last_image async def async_get_camera_motion_status(self, command='status'):",
"details about this platform, please refer to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import",
"except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control at {url}\") # return except aiohttp.ClientError",
"'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url' CONF_WITH_MOTION_CONTROL = 'with_motion_control' DEFAULT_NAME = 'MotionEye",
"ParseResult(scheme, netloc, url, params, query, fragment) url_p = urlparse(self._snapshot_url) control_port = device_info[CONF_CONTROL_PORT] cam_id",
"_LOGGER.error(f\"No control response in {url}\") status_found = reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control",
"using the snapshot url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__()",
"= False async def async_added_to_hass(self): \"\"\"Handle all entity which are about to be",
"still image response from the camera.\"\"\" try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop):",
"else RG_CONTROL try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(url)",
"websession.get(url) raw = await response.read() if not raw: _LOGGER.error(f\"No control response in {url}\")",
"'status' else RG_CONTROL try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await",
"a MotionEye camera, using the snapshot url.\"\"\" def __init__(self, hass, device_info): \"\"\"Initialize a",
"motion detection control at {url}\") # return except aiohttp.ClientError as err: _LOGGER.error(f\"Error in",
"None self._last_status = None self._motion_detection_active = False self.is_streaming = False # self._motion_detected =",
"Timeout getting camera image\", self.entity_id) self._online = False except aiohttp.ClientError as err: if",
"device is recording.\"\"\" # return self._motion_detected return self._motion_detection_active @property def brand(self): \"\"\"Return the",
"_LOGGER.error(f\"Bad control response from {url}: \" f\"{raw}, no pattern found\") self._motion_detection_active = False",
"return self._motion_detection_active @property def brand(self): \"\"\"Return the camera brand.\"\"\" return \"MotionEye\" @property def",
"\"\"\"Enable motion detection in the camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async",
"{url}: \" f\"{str(err)}\") @property def name(self): \"\"\"Return the name of this device.\"\"\" return",
"DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import config_validation as cv from",
"(\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async def async_setup_platform(hass, config,",
"self._online: _LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id) self._online = False except aiohttp.ClientError as",
"to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging from urllib.parse import",
"import aiohttp import async_timeout import voluptuous as vol from homeassistant.const import CONF_NAME from",
"self._online = True if (self._control_url is None or (self._last_status is not None and",
"from urllib.parse import urlparse import re import aiohttp import async_timeout import voluptuous as",
"brand(self): \"\"\"Return the camera brand.\"\"\" return \"MotionEye\" @property def motion_detection_enabled(self): \"\"\"Return the camera",
"cv.positive_int }) RG_STATUS = re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n')",
"f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image = None self._last_status = None self._motion_detection_active =",
"MotionEyeCamera(Camera): \"\"\"A very simple implementation of a MotionEye camera, using the snapshot url.\"\"\"",
"\"\"\"Return true if the device is recording.\"\"\" # return self._motion_detected return self._motion_detection_active @property",
"config)]) class MotionEyeCamera(Camera): \"\"\"A very simple implementation of a MotionEye camera, using the",
"async def async_enable_motion_detection(self): \"\"\"Enable motion detection in the camera.\"\"\" self.is_streaming = True await",
"in ['ACTIVE', 'resumed']: self._motion_detection_active = True else: self._motion_detection_active = False self._last_status = utcnow()",
"self._motion_detection_active = True else: self._motion_detection_active = False self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout",
"image response from the camera.\"\"\" try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response",
"default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean, vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int, vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int }) RG_STATUS",
"simple implementation of a MotionEye camera, using the snapshot url.\"\"\" def __init__(self, hass,",
"= False # self._motion_detected = False async def async_added_to_hass(self): \"\"\"Handle all entity which",
"if command == 'status' else RG_CONTROL try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop):",
"try: websession = async_get_clientsession(self.hass) with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image =",
"is_recording(self): \"\"\"Return true if the device is recording.\"\"\" # return self._motion_detected return self._motion_detection_active",
"refer to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging from urllib.parse",
"def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up a generic IP Camera.\"\"\" async_add_entities([MotionEyeCamera(hass, config)])",
"control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL",
"self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of camera image.\"\"\"",
"asyncio.TimeoutError: if self._online: _LOGGER.warning(\"%s: Timeout getting camera image\", self.entity_id) self._online = False except",
"platform, please refer to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import logging",
"super().__init__() self.hass = hass self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL]",
"in the camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable",
"= RG_STATUS if command == 'status' else RG_CONTROL try: websession = async_get_clientsession(self.hass) with",
"generic camera.\"\"\" super().__init__() self.hass = hass self._name = device_info.get(CONF_NAME) self.content_type = DEFAULT_CONTENT_TYPE self._snapshot_url",
"getting new camera image: %s\", self.name, err ) self._online = False return self._last_image",
"more details about this platform, please refer to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\"",
"= True if (self._control_url is None or (self._last_status is not None and (utcnow()",
"image\", self.entity_id) self._online = False except aiohttp.ClientError as err: if self._online: _LOGGER.error( \"%s:",
"= device_info[CONF_CONTROL_PORT] cam_id = device_info[CONF_CONTROL_CAM_ID] self._control_url = ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online =",
"recording.\"\"\" # return self._motion_detected return self._motion_detection_active @property def brand(self): \"\"\"Return the camera brand.\"\"\"",
"re.compile(' Detection status (\\w+)\\s?\\n') RG_CONTROL = re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async",
"self._online: _LOGGER.error( \"%s: ClientError getting new camera image: %s\", self.name, err ) self._online",
"pull as well! (scan_interval = 120) await self.async_get_camera_motion_status(command='status') @property def is_recording(self): \"\"\"Return true",
"device_info): \"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass = hass self._name = device_info.get(CONF_NAME) self.content_type",
"= True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\"",
"\"\"\"Initialize a generic camera.\"\"\" super().__init__() self.hass = hass self._name = device_info.get(CONF_NAME) self.content_type =",
"err: if self._online: _LOGGER.error( \"%s: ClientError getting new camera image: %s\", self.name, err",
"camera motion detection status.\"\"\" return self._motion_detection_active @property def model(self): \"\"\"Return the camera model.\"\"\"",
"very simple implementation of a MotionEye camera, using the snapshot url.\"\"\" def __init__(self,",
"not status_found: _LOGGER.error(f\"Bad control response from {url}: \" f\"{raw}, no pattern found\") self._motion_detection_active",
"= reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control response from {url}: \" f\"{raw}, no",
"self.async_schedule_update_ha_state() async def async_disable_motion_detection(self): \"\"\"Disable motion detection in camera.\"\"\" self.is_streaming = False await",
"= None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: # ParseResult(scheme, netloc, url, params, query,",
"re.compile(' Detection (\\w+)\\s?\\n') # pylint: disable=unused-argument async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set",
"= 'MotionEye Camera' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SNAPSHOT_URL): cv.url, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_WITH_MOTION_CONTROL, default=False):",
"# TODO add some periodic status pull as well! (scan_interval = 120) await",
"@property def is_recording(self): \"\"\"Return true if the device is recording.\"\"\" # return self._motion_detected",
"await websession.get(self._snapshot_url) self._last_image = await response.read() if not self._online: _LOGGER.warning(\"%s: Recovered camera image\",",
"are about to be added.\"\"\" # TODO add some periodic status pull as",
"False self._last_status = utcnow() except asyncio.TimeoutError: _LOGGER.warning(f\"Timeout in motion detection control at {url}\")",
"at {url}: \" f\"{str(err)}\") @property def name(self): \"\"\"Return the name of this device.\"\"\"",
"http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off _LOGGER = logging.getLogger(__name__) CONF_CONTROL_PORT = 'control_port' CONF_CONTROL_CAM_ID = 'camera_id' CONF_SNAPSHOT_URL = 'snapshot_url'",
"None self._motion_detection_active = False self.is_streaming = False # self._motion_detected = False async def",
"control at {url}\") # return except aiohttp.ClientError as err: _LOGGER.error(f\"Error in motion detection",
"motion detection in the camera.\"\"\" self.is_streaming = True await self.async_get_camera_motion_status(command='start') self.async_schedule_update_ha_state() async def",
"this platform, please refer to the documentation at https://home-assistant.io/components/camera.motioneye/ \"\"\" import asyncio import",
"motion detection in camera.\"\"\" self.is_streaming = False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return",
"False await self.async_get_camera_motion_status(command='pause') self.async_schedule_update_ha_state() def camera_image(self): \"\"\"Return bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe(",
"DEFAULT_CONTENT_TYPE self._snapshot_url = device_info[CONF_SNAPSHOT_URL] self._control_url = None self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL] if self._with_motion_detection: #",
"reg_expr.findall(raw.decode()) if not status_found: _LOGGER.error(f\"Bad control response from {url}: \" f\"{raw}, no pattern",
"reg_expr = RG_STATUS if command == 'status' else RG_CONTROL try: websession = async_get_clientsession(self.hass)",
"= ( f\"{url_p.scheme}://{url_p.netloc.split(':')[0]}\" f\":{control_port}/{cam_id}/detection/\" ) self._online = True self._last_image = None self._last_status =",
"url = self._control_url + command reg_expr = RG_STATUS if command == 'status' else",
"_LOGGER.error( \"%s: ClientError getting new camera image: %s\", self.name, err ) self._online =",
"def camera_image(self): \"\"\"Return bytes of camera image.\"\"\" return asyncio.run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() async def",
"image: %s\", self.name, err ) self._online = False return self._last_image async def async_get_camera_motion_status(self,",
"PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import config_validation as cv",
"import ( PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import config_validation",
"(\\w+)\\s?\\n') # pylint: disable=unused-argument async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): \"\"\"Set up a",
"or (self._last_status is not None and (utcnow() - self._last_status).total_seconds() < 60)): return self._last_image",
"status of the camera.\"\"\" if self._control_url is None: self._motion_detection_active = False return url",
"with async_timeout.timeout(10, loop=self.hass.loop): response = await websession.get(self._snapshot_url) self._last_image = await response.read() if not",
"True if (self._control_url is None or (self._last_status is not None and (utcnow() -"
] |
[
"ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum() / data['lnReturns'].sum(axis=1).std() print('Sharpe-Ratio:\\n",
"<reponame>LucaCamerani/EcoFin-library \"\"\" benchmarkChart.py Created by <NAME> at 06/02/2021, University of Milano-Bicocca. (<EMAIL>) All",
"Open Source License\". \"\"\" import matplotlib.pyplot as plt import numpy as np import",
"tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy return vs.",
"# Import data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source =",
"axs[0].set_title('Underlying returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot",
"is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the \"BSD Open",
"figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns') for tick in",
"data data[driver] = pd.concat(data[driver], axis=1) # Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) /",
"from tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')]",
"tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15",
"# Plot results fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) #",
"return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance metrics SR_b",
"np.log(data[driver].shift(-1) / data[driver]) # Plot results fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True)",
"except: pass # Merge data data[driver] = pd.concat(data[driver], axis=1) # Compute ln-returns table",
"as plt import numpy as np import pandas as pd from tqdm import",
"start_date = 0 driver = 'SpotPrice' # ---------------------------------------------------------- data = {driver: {}} for",
"($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum() / data['lnReturns'].sum(axis=1).std()",
"source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'],",
"data[driver][tick] = source[driver] except: pass # Merge data data[driver] = pd.concat(data[driver], axis=1) #",
"np import pandas as pd from tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list =",
"desc='Importing data'): try: # Import data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min),",
"'SpotPrice' # ---------------------------------------------------------- data = {driver: {}} for tick in tqdm(ticker_list, desc='Importing data'):",
"inplace=True) data[driver][tick] = source[driver] except: pass # Merge data data[driver] = pd.concat(data[driver], axis=1)",
"benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute",
"Import data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source = source.loc[source['Date']",
"[line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path = r'../Export/BackTest_C' start_date =",
"/ data[driver]) # Plot results fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark',",
"released under the \"BSD Open Source License\". \"\"\" import matplotlib.pyplot as plt import",
"0 driver = 'SpotPrice' # ---------------------------------------------------------- data = {driver: {}} for tick in",
"maturity_min), engine='openpyxl') source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick]",
"# -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path",
"axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio",
"the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the \"BSD Open Source License\". \"\"\"",
"= [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path = r'../Export/BackTest_C' start_date",
"under the \"BSD Open Source License\". \"\"\" import matplotlib.pyplot as plt import numpy",
"EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the \"BSD Open Source License\". \"\"\" import",
"for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy return",
"of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the \"BSD Open Source License\".",
"data[driver] = pd.concat(data[driver], axis=1) # Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver])",
"for line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path = r'../Export/BackTest_C' start_date = 0",
"pass # Merge data data[driver] = pd.concat(data[driver], axis=1) # Compute ln-returns table data['lnReturns']",
"is released under the \"BSD Open Source License\". \"\"\" import matplotlib.pyplot as plt",
"(portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance",
"\"\"\" import matplotlib.pyplot as plt import numpy as np import pandas as pd",
"Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum() / data['lnReturns'].sum(axis=1).std() print('Sharpe-Ratio:\\n • Benchmark: {}'.format(SR_b)) plt.show()",
"benchmarkChart.py Created by <NAME> at 06/02/2021, University of Milano-Bicocca. (<EMAIL>) All rights reserved.",
"# Merge data data[driver] = pd.concat(data[driver], axis=1) # Compute ln-returns table data['lnReturns'] =",
"All rights reserved. This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is",
"axs = plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns')",
"Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) # Plot results fig, axs",
"tick in tqdm(ticker_list, desc='Importing data'): try: # Import data and clean-up source =",
"axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum() /",
"plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns') for tick",
"{}} for tick in tqdm(ticker_list, desc='Importing data'): try: # Import data and clean-up",
"vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() #",
"# Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum() / data['lnReturns'].sum(axis=1).std() print('Sharpe-Ratio:\\n • Benchmark: {}'.format(SR_b))",
"axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)),",
"= 0 driver = 'SpotPrice' # ---------------------------------------------------------- data = {driver: {}} for tick",
"= pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'),",
"(https://github.com/LucaCamerani/EcoFin-Library), and is released under the \"BSD Open Source License\". \"\"\" import matplotlib.pyplot",
"engine='openpyxl') source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] =",
"\"\"\" benchmarkChart.py Created by <NAME> at 06/02/2021, University of Milano-Bicocca. (<EMAIL>) All rights",
"and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source = source.loc[source['Date'] >= start_date,",
"8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns') for tick in data[driver].keys():",
"Merge data data[driver] = pd.concat(data[driver], axis=1) # Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1)",
"as pd from tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line",
"axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance metrics",
"sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]),",
"for tick in tqdm(ticker_list, desc='Importing data'): try: # Import data and clean-up source",
"15 base_path = r'../Export/BackTest_C' start_date = 0 driver = 'SpotPrice' # ---------------------------------------------------------- data",
"Created by <NAME> at 06/02/2021, University of Milano-Bicocca. (<EMAIL>) All rights reserved. This",
"driver = 'SpotPrice' # ---------------------------------------------------------- data = {driver: {}} for tick in tqdm(ticker_list,",
"matplotlib.pyplot as plt import numpy as np import pandas as pd from tqdm",
"numpy as np import pandas as pd from tqdm import tqdm # -------------------------[Set-up]-------------------------",
"in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path = r'../Export/BackTest_C' start_date = 0 driver =",
"= source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver] except:",
"in tqdm(ticker_list, desc='Importing data'): try: # Import data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path,",
"ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) # Plot results fig, axs =",
"= 15 base_path = r'../Export/BackTest_C' start_date = 0 driver = 'SpotPrice' # ----------------------------------------------------------",
"fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick)",
"r'../Export/BackTest_C' start_date = 0 driver = 'SpotPrice' # ---------------------------------------------------------- data = {driver: {}}",
"source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver] except: pass # Merge data data[driver]",
"# Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) # Plot results fig,",
"import tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min =",
"~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver] except: pass # Merge data",
"pd from tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line in",
"tqdm(ticker_list, desc='Importing data'): try: # Import data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick,",
"= np.log(data[driver].shift(-1) / data[driver]) # Plot results fig, axs = plt.subplots(2, figsize=(15, 8),",
"---------------------------------------------------------- data = {driver: {}} for tick in tqdm(ticker_list, desc='Importing data'): try: #",
"<NAME> at 06/02/2021, University of Milano-Bicocca. (<EMAIL>) All rights reserved. This file is",
"rights reserved. This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released",
"import pandas as pd from tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n')",
"table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) # Plot results fig, axs = plt.subplots(2,",
"strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)')",
"($X_t$)') axs[1].legend() # Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum() / data['lnReturns'].sum(axis=1).std() print('Sharpe-Ratio:\\n •",
"try: # Import data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source",
"\"BSD Open Source License\". \"\"\" import matplotlib.pyplot as plt import numpy as np",
"(<EMAIL>) All rights reserved. This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and",
"import numpy as np import pandas as pd from tqdm import tqdm #",
"data = {driver: {}} for tick in tqdm(ticker_list, desc='Importing data'): try: # Import",
"= pd.concat(data[driver], axis=1) # Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) #",
">= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver] except: pass #",
"Plot results fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot",
"line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path = r'../Export/BackTest_C' start_date = 0 driver",
"label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio return')",
"Plot strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns",
"pd.concat(data[driver], axis=1) # Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) # Plot",
"Milano-Bicocca. (<EMAIL>) All rights reserved. This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),",
"fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns",
"results fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark",
"License\". \"\"\" import matplotlib.pyplot as plt import numpy as np import pandas as",
"data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy return vs. benchmark (portfolio)",
"by <NAME> at 06/02/2021, University of Milano-Bicocca. (<EMAIL>) All rights reserved. This file",
"source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver] except: pass",
"tick, maturity_min), engine='openpyxl') source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True)",
"{driver: {}} for tick in tqdm(ticker_list, desc='Importing data'): try: # Import data and",
"# Plot strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)',",
"06/02/2021, University of Milano-Bicocca. (<EMAIL>) All rights reserved. This file is part of",
"part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the \"BSD Open Source",
"source[driver] except: pass # Merge data data[driver] = pd.concat(data[driver], axis=1) # Compute ln-returns",
"axis=1) # Compute ln-returns table data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) # Plot results",
"= r'../Export/BackTest_C' start_date = 0 driver = 'SpotPrice' # ---------------------------------------------------------- data = {driver:",
"and is released under the \"BSD Open Source License\". \"\"\" import matplotlib.pyplot as",
"axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance metrics SR_b =",
"This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the",
"format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver] except: pass # Merge data data[driver] =",
"of Milano-Bicocca. (<EMAIL>) All rights reserved. This file is part of the EcoFin-Library",
"import matplotlib.pyplot as plt import numpy as np import pandas as pd from",
"data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver]) # Plot results fig, axs = plt.subplots(2, figsize=(15,",
"open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path = r'../Export/BackTest_C' start_date = 0 driver = 'SpotPrice'",
"axs[0].legend(ncol=4) # Plot strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time",
"data[driver]) # Plot results fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16)",
"start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver] except: pass # Merge",
"returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy",
"pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True,",
"reserved. This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under",
"Plot benchmark axs[0].set_title('Underlying returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4)",
"($X_t$)') axs[0].legend(ncol=4) # Plot strategy return vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark')",
"maturity_min = 15 base_path = r'../Export/BackTest_C' start_date = 0 driver = 'SpotPrice' #",
"# ---------------------------------------------------------- data = {driver: {}} for tick in tqdm(ticker_list, desc='Importing data'): try:",
"= 'SpotPrice' # ---------------------------------------------------------- data = {driver: {}} for tick in tqdm(ticker_list, desc='Importing",
"drop=True, inplace=True) data[driver][tick] = source[driver] except: pass # Merge data data[driver] = pd.concat(data[driver],",
"ticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path = r'../Export/BackTest_C'",
"tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min",
"= source[driver] except: pass # Merge data data[driver] = pd.concat(data[driver], axis=1) # Compute",
"# Plot benchmark axs[0].set_title('Underlying returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)')",
"-------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')] maturity_min = 15 base_path =",
"data'): try: # Import data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl')",
"pandas as pd from tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list = [line.rstrip('\\n') for",
"clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')]",
"= plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying returns') for",
"benchmark axs[0].set_title('Underlying returns') for tick in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) #",
"University of Milano-Bicocca. (<EMAIL>) All rights reserved. This file is part of the",
"= {driver: {}} for tick in tqdm(ticker_list, desc='Importing data'): try: # Import data",
"the \"BSD Open Source License\". \"\"\" import matplotlib.pyplot as plt import numpy as",
"label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend() # Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum()",
"return vs. benchmark (portfolio) axs[1].set_title('Portfolio return') axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark') axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)') axs[1].legend()",
"as np import pandas as pd from tqdm import tqdm # -------------------------[Set-up]------------------------- ticker_list",
"at 06/02/2021, University of Milano-Bicocca. (<EMAIL>) All rights reserved. This file is part",
"base_path = r'../Export/BackTest_C' start_date = 0 driver = 'SpotPrice' # ---------------------------------------------------------- data =",
"axs[1].legend() # Compute performance metrics SR_b = data['lnReturns'].sum(axis=1).sum() / data['lnReturns'].sum(axis=1).std() print('Sharpe-Ratio:\\n • Benchmark:",
"plt import numpy as np import pandas as pd from tqdm import tqdm",
"data and clean-up source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl') source = source.loc[source['Date'] >=",
"in data[driver].keys(): axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick) axs[0].set(ylabel=r'ln-returns ($X_t$)') axs[0].legend(ncol=4) # Plot strategy return vs. benchmark",
"source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')] source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True) data[driver][tick] = source[driver]",
"Source License\". \"\"\" import matplotlib.pyplot as plt import numpy as np import pandas",
"file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the \"BSD",
"fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True) fig.suptitle('Benchmark', fontsize=16) # Plot benchmark axs[0].set_title('Underlying"
] |
[
"= self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] =",
"status(self): return self._status @status.setter def status(self, value): self._status = value def to_alipay_dict(self): params",
"else: params['business_scene'] = self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else:",
"'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'):",
"__init__(self): self._business_scene = None self._group_id = None self._isv_name = None self._pid = None",
"if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if self.pid: if",
"python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class",
"d: o.sign_app_id = d['sign_app_id'] if 'status' in d: o.status = d['status'] return o",
"return self._pid @pid.setter def pid(self, value): self._pid = value @property def school_id(self): return",
"= self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid'] =",
"self.pid.to_alipay_dict() else: params['pid'] = self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict()",
"isinstance(self.business_scene, list): for i in range(0, len(self.business_scene)): element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'):",
"def group_id(self): return self._group_id @group_id.setter def group_id(self, value): self._group_id = value @property def",
"def pid(self, value): self._pid = value @property def school_id(self): return self._school_id @school_id.setter def",
"@business_scene.setter def business_scene(self, value): if isinstance(value, list): self._business_scene = list() for i in",
"self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code",
"params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status return params @staticmethod def from_alipay_dict(d): if",
"i in range(0, len(self.business_scene)): element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict()",
"@property def school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code = value @property",
"@staticmethod def from_alipay_dict(d): if not d: return None o = SceneConfigQueryDTO() if 'business_scene'",
"params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid']",
"d['school_id'] if 'school_name' in d: o.school_name = d['school_name'] if 'school_std_code' in d: o.school_std_code",
"self._status @status.setter def status(self, value): self._status = value def to_alipay_dict(self): params = dict()",
"= value @property def pid(self): return self._pid @pid.setter def pid(self, value): self._pid =",
"if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if self.status: if",
"value): self._school_id = value @property def school_name(self): return self._school_name @school_name.setter def school_name(self, value):",
"alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None self._group_id = None",
"if self.business_scene: if isinstance(self.business_scene, list): for i in range(0, len(self.business_scene)): element = self.business_scene[i]",
"= self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] =",
"def school_name(self): return self._school_name @school_name.setter def school_name(self, value): self._school_name = value @property def",
"self._group_id @group_id.setter def group_id(self, value): self._group_id = value @property def isv_name(self): return self._isv_name",
"from_alipay_dict(d): if not d: return None o = SceneConfigQueryDTO() if 'business_scene' in d:",
"self.status.to_alipay_dict() else: params['status'] = self.status return params @staticmethod def from_alipay_dict(d): if not d:",
"def school_id(self): return self._school_id @school_id.setter def school_id(self, value): self._school_id = value @property def",
"hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if self.pid: if hasattr(self.pid,",
"o.school_std_code = d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id = d['sign_app_id'] if 'status' in",
"d['pid'] if 'school_id' in d: o.school_id = d['school_id'] if 'school_name' in d: o.school_name",
"if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else:",
"group_id(self): return self._group_id @group_id.setter def group_id(self, value): self._group_id = value @property def isv_name(self):",
"value): self._school_std_code = value @property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value):",
"sign_app_id(self, value): self._sign_app_id = value @property def status(self): return self._status @status.setter def status(self,",
"list): for i in range(0, len(self.business_scene)): element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i]",
"in d: o.school_name = d['school_name'] if 'school_std_code' in d: o.school_std_code = d['school_std_code'] if",
"= self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] =",
"params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code']",
"return params @staticmethod def from_alipay_dict(d): if not d: return None o = SceneConfigQueryDTO()",
"#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import *",
"return self._school_id @school_id.setter def school_id(self, value): self._school_id = value @property def school_name(self): return",
"'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'):",
"self._business_scene.append(i) @property def group_id(self): return self._group_id @group_id.setter def group_id(self, value): self._group_id = value",
"= list() for i in value: self._business_scene.append(i) @property def group_id(self): return self._group_id @group_id.setter",
"import * class SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None self._group_id = None self._isv_name",
"self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name",
"for i in range(0, len(self.business_scene)): element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] =",
"value): self._group_id = value @property def isv_name(self): return self._isv_name @isv_name.setter def isv_name(self, value):",
"self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name = value @property def pid(self): return self._pid",
"params['business_scene'] = self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id']",
"self._status = value def to_alipay_dict(self): params = dict() if self.business_scene: if isinstance(self.business_scene, list):",
"self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict()",
"'business_scene' in d: o.business_scene = d['business_scene'] if 'group_id' in d: o.group_id = d['group_id']",
"= self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] =",
"self._isv_name = None self._pid = None self._school_id = None self._school_name = None self._school_std_code",
"'group_id' in d: o.group_id = d['group_id'] if 'isv_name' in d: o.isv_name = d['isv_name']",
"value): self._isv_name = value @property def pid(self): return self._pid @pid.setter def pid(self, value):",
"d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id = d['sign_app_id'] if 'status' in d: o.status",
"self.business_scene: if isinstance(self.business_scene, list): for i in range(0, len(self.business_scene)): element = self.business_scene[i] if",
"self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict()",
"value @property def school_name(self): return self._school_name @school_name.setter def school_name(self, value): self._school_name = value",
"pid(self, value): self._pid = value @property def school_id(self): return self._school_id @school_id.setter def school_id(self,",
"value): self._school_name = value @property def school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self, value):",
"value): self._sign_app_id = value @property def status(self): return self._status @status.setter def status(self, value):",
"hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id,",
"@group_id.setter def group_id(self, value): self._group_id = value @property def isv_name(self): return self._isv_name @isv_name.setter",
"o.school_id = d['school_id'] if 'school_name' in d: o.school_name = d['school_name'] if 'school_std_code' in",
"o.isv_name = d['isv_name'] if 'pid' in d: o.pid = d['pid'] if 'school_id' in",
"in d: o.sign_app_id = d['sign_app_id'] if 'status' in d: o.status = d['status'] return",
"self._school_id @school_id.setter def school_id(self, value): self._school_id = value @property def school_name(self): return self._school_name",
"utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def __init__(self): self._business_scene",
"if 'pid' in d: o.pid = d['pid'] if 'school_id' in d: o.school_id =",
"self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if self.sign_app_id:",
"range(0, len(self.business_scene)): element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene,",
"= d['isv_name'] if 'pid' in d: o.pid = d['pid'] if 'school_id' in d:",
"o.group_id = d['group_id'] if 'isv_name' in d: o.isv_name = d['isv_name'] if 'pid' in",
"status(self, value): self._status = value def to_alipay_dict(self): params = dict() if self.business_scene: if",
"@sign_app_id.setter def sign_app_id(self, value): self._sign_app_id = value @property def status(self): return self._status @status.setter",
"school_id(self): return self._school_id @school_id.setter def school_id(self, value): self._school_id = value @property def school_name(self):",
"if 'group_id' in d: o.group_id = d['group_id'] if 'isv_name' in d: o.isv_name =",
"self._business_scene = list() for i in value: self._business_scene.append(i) @property def group_id(self): return self._group_id",
"@property def status(self): return self._status @status.setter def status(self, value): self._status = value def",
"school_name(self): return self._school_name @school_name.setter def school_name(self, value): self._school_name = value @property def school_std_code(self):",
"self._pid @pid.setter def pid(self, value): self._pid = value @property def school_id(self): return self._school_id",
"dict() if self.business_scene: if isinstance(self.business_scene, list): for i in range(0, len(self.business_scene)): element =",
"value def to_alipay_dict(self): params = dict() if self.business_scene: if isinstance(self.business_scene, list): for i",
"params['sign_app_id'] = self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status']",
"@property def business_scene(self): return self._business_scene @business_scene.setter def business_scene(self, value): if isinstance(value, list): self._business_scene",
"not d: return None o = SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene =",
"if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if",
"d: o.pid = d['pid'] if 'school_id' in d: o.school_id = d['school_id'] if 'school_name'",
"self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if self.isv_name:",
"= self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] =",
"isv_name(self): return self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name = value @property def pid(self):",
"self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status return params",
"self._group_id = None self._isv_name = None self._pid = None self._school_id = None self._school_name",
"params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name']",
"None self._group_id = None self._isv_name = None self._pid = None self._school_id = None",
"pid(self): return self._pid @pid.setter def pid(self, value): self._pid = value @property def school_id(self):",
"hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if self.group_id: if hasattr(self.group_id,",
"= self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] =",
"if 'isv_name' in d: o.isv_name = d['isv_name'] if 'pid' in d: o.pid =",
"in d: o.pid = d['pid'] if 'school_id' in d: o.school_id = d['school_id'] if",
"value @property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id = value",
"= d['group_id'] if 'isv_name' in d: o.isv_name = d['isv_name'] if 'pid' in d:",
"d: o.group_id = d['group_id'] if 'isv_name' in d: o.isv_name = d['isv_name'] if 'pid'",
"= None self._group_id = None self._isv_name = None self._pid = None self._school_id =",
"self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict()",
"params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id']",
"def school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code = value @property def",
"d: o.isv_name = d['isv_name'] if 'pid' in d: o.pid = d['pid'] if 'school_id'",
"= self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] =",
"'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status return params @staticmethod def from_alipay_dict(d):",
"self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id",
"None self._isv_name = None self._pid = None self._school_id = None self._school_name = None",
"json from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None self._group_id",
"None self._status = None @property def business_scene(self): return self._business_scene @business_scene.setter def business_scene(self, value):",
"in range(0, len(self.business_scene)): element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if",
"= element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if",
"if isinstance(value, list): self._business_scene = list() for i in value: self._business_scene.append(i) @property def",
"@property def school_id(self): return self._school_id @school_id.setter def school_id(self, value): self._school_id = value @property",
"return self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name = value @property def pid(self): return",
"self._sign_app_id = value @property def status(self): return self._status @status.setter def status(self, value): self._status",
"None self._school_id = None self._school_name = None self._school_std_code = None self._sign_app_id = None",
"class SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None self._group_id = None self._isv_name = None",
"self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict()",
"if 'school_name' in d: o.school_name = d['school_name'] if 'school_std_code' in d: o.school_std_code =",
"business_scene(self, value): if isinstance(value, list): self._business_scene = list() for i in value: self._business_scene.append(i)",
"import json from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None",
"None self._school_std_code = None self._sign_app_id = None self._status = None @property def business_scene(self):",
"o.pid = d['pid'] if 'school_id' in d: o.school_id = d['school_id'] if 'school_name' in",
"o.school_name = d['school_name'] if 'school_std_code' in d: o.school_std_code = d['school_std_code'] if 'sign_app_id' in",
"value @property def pid(self): return self._pid @pid.setter def pid(self, value): self._pid = value",
"coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def __init__(self):",
"sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id = value @property def status(self):",
"params = dict() if self.business_scene: if isinstance(self.business_scene, list): for i in range(0, len(self.business_scene)):",
"value @property def status(self): return self._status @status.setter def status(self, value): self._status = value",
"params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status']",
"self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id = value @property def status(self): return self._status",
"self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict()",
"'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'):",
"'school_id' in d: o.school_id = d['school_id'] if 'school_name' in d: o.school_name = d['school_name']",
"if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status return params @staticmethod",
"= value def to_alipay_dict(self): params = dict() if self.business_scene: if isinstance(self.business_scene, list): for",
"return self._group_id @group_id.setter def group_id(self, value): self._group_id = value @property def isv_name(self): return",
"if 'business_scene' in d: o.business_scene = d['business_scene'] if 'group_id' in d: o.group_id =",
"else: params['group_id'] = self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else:",
"self._isv_name = value @property def pid(self): return self._pid @pid.setter def pid(self, value): self._pid",
"= None self._school_name = None self._school_std_code = None self._sign_app_id = None self._status =",
"params['group_id'] = self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name']",
"= d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id = d['sign_app_id'] if 'status' in d:",
"= dict() if self.business_scene: if isinstance(self.business_scene, list): for i in range(0, len(self.business_scene)): element",
"in d: o.school_id = d['school_id'] if 'school_name' in d: o.school_name = d['school_name'] if",
"def school_std_code(self, value): self._school_std_code = value @property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def",
"self._business_scene @business_scene.setter def business_scene(self, value): if isinstance(value, list): self._business_scene = list() for i",
"if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if self.isv_name: if",
"'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'):",
"return self._business_scene @business_scene.setter def business_scene(self, value): if isinstance(value, list): self._business_scene = list() for",
"= value @property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id =",
"= d['pid'] if 'school_id' in d: o.school_id = d['school_id'] if 'school_name' in d:",
"'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'):",
"'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid'] = self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'):",
"'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'):",
"None self._sign_app_id = None self._status = None @property def business_scene(self): return self._business_scene @business_scene.setter",
"if 'school_id' in d: o.school_id = d['school_id'] if 'school_name' in d: o.school_name =",
"else: params['school_id'] = self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else:",
"self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status",
"business_scene(self): return self._business_scene @business_scene.setter def business_scene(self, value): if isinstance(value, list): self._business_scene = list()",
"= value @property def school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code =",
"@property def pid(self): return self._pid @pid.setter def pid(self, value): self._pid = value @property",
"return self._school_name @school_name.setter def school_name(self, value): self._school_name = value @property def school_std_code(self): return",
"in d: o.business_scene = d['business_scene'] if 'group_id' in d: o.group_id = d['group_id'] if",
"if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid'] = self.pid if self.school_id: if",
"def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id = value @property def",
"# -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object):",
"school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code = value @property def sign_app_id(self):",
"else: params['isv_name'] = self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else:",
"@pid.setter def pid(self, value): self._pid = value @property def school_id(self): return self._school_id @school_id.setter",
"= self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] =",
"else: params['pid'] = self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else:",
"self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id",
"= None self._status = None @property def business_scene(self): return self._business_scene @business_scene.setter def business_scene(self,",
"if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if self.school_name: if",
"else: params['school_name'] = self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else:",
"None o = SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene = d['business_scene'] if 'group_id'",
"if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if",
"self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid'] = self.pid if self.school_id:",
"self._school_id = value @property def school_name(self): return self._school_name @school_name.setter def school_name(self, value): self._school_name",
"None @property def business_scene(self): return self._business_scene @business_scene.setter def business_scene(self, value): if isinstance(value, list):",
"group_id(self, value): self._group_id = value @property def isv_name(self): return self._isv_name @isv_name.setter def isv_name(self,",
"= None self._school_std_code = None self._sign_app_id = None self._status = None @property def",
"d['group_id'] if 'isv_name' in d: o.isv_name = d['isv_name'] if 'pid' in d: o.pid",
"if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid'] = self.pid if",
"SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None self._group_id = None self._isv_name = None self._pid",
"params['pid'] = self.pid.to_alipay_dict() else: params['pid'] = self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id']",
"= None @property def business_scene(self): return self._business_scene @business_scene.setter def business_scene(self, value): if isinstance(value,",
"return self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code = value @property def sign_app_id(self): return",
"@school_std_code.setter def school_std_code(self, value): self._school_std_code = value @property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter",
"= self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] =",
"self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene",
"params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id']",
"hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status return params @staticmethod def",
"@status.setter def status(self, value): self._status = value def to_alipay_dict(self): params = dict() if",
"value): if isinstance(value, list): self._business_scene = list() for i in value: self._business_scene.append(i) @property",
"params @staticmethod def from_alipay_dict(d): if not d: return None o = SceneConfigQueryDTO() if",
"def pid(self): return self._pid @pid.setter def pid(self, value): self._pid = value @property def",
"list): self._business_scene = list() for i in value: self._business_scene.append(i) @property def group_id(self): return",
"@property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id = value @property",
"def isv_name(self, value): self._isv_name = value @property def pid(self): return self._pid @pid.setter def",
"= self.status return params @staticmethod def from_alipay_dict(d): if not d: return None o",
"value: self._business_scene.append(i) @property def group_id(self): return self._group_id @group_id.setter def group_id(self, value): self._group_id =",
"self.status return params @staticmethod def from_alipay_dict(d): if not d: return None o =",
"value @property def isv_name(self): return self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name = value",
"def to_alipay_dict(self): params = dict() if self.business_scene: if isinstance(self.business_scene, list): for i in",
"self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if self.school_std_code:",
"def from_alipay_dict(d): if not d: return None o = SceneConfigQueryDTO() if 'business_scene' in",
"= d['business_scene'] if 'group_id' in d: o.group_id = d['group_id'] if 'isv_name' in d:",
"= d['school_name'] if 'school_std_code' in d: o.school_std_code = d['school_std_code'] if 'sign_app_id' in d:",
"self._school_std_code = None self._sign_app_id = None self._status = None @property def business_scene(self): return",
"if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if self.group_id: if",
"in d: o.school_std_code = d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id = d['sign_app_id'] if",
"self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code = value @property def sign_app_id(self): return self._sign_app_id",
"self._school_name @school_name.setter def school_name(self, value): self._school_name = value @property def school_std_code(self): return self._school_std_code",
"SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene = d['business_scene'] if 'group_id' in d: o.group_id",
"element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if self.group_id:",
"if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status return",
"d['business_scene'] if 'group_id' in d: o.group_id = d['group_id'] if 'isv_name' in d: o.isv_name",
"def group_id(self, value): self._group_id = value @property def isv_name(self): return self._isv_name @isv_name.setter def",
"self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict()",
"= self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] =",
"if not d: return None o = SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene",
"= self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] =",
"def isv_name(self): return self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name = value @property def",
"self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid'] = self.pid",
"self._status = None @property def business_scene(self): return self._business_scene @business_scene.setter def business_scene(self, value): if",
"self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if self.school_name:",
"self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict() else: params['isv_name'] = self.isv_name if self.pid:",
"= d['school_id'] if 'school_name' in d: o.school_name = d['school_name'] if 'school_std_code' in d:",
"params['school_id'] = self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name']",
"= SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene = d['business_scene'] if 'group_id' in d:",
"in value: self._business_scene.append(i) @property def group_id(self): return self._group_id @group_id.setter def group_id(self, value): self._group_id",
"in d: o.isv_name = d['isv_name'] if 'pid' in d: o.pid = d['pid'] if",
"'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'):",
"= value @property def status(self): return self._status @status.setter def status(self, value): self._status =",
"params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name']",
"def sign_app_id(self, value): self._sign_app_id = value @property def status(self): return self._status @status.setter def",
"None self._pid = None self._school_id = None self._school_name = None self._school_std_code = None",
"self._business_scene = None self._group_id = None self._isv_name = None self._pid = None self._school_id",
"if 'sign_app_id' in d: o.sign_app_id = d['sign_app_id'] if 'status' in d: o.status =",
"self._school_name = value @property def school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code",
"d: o.school_id = d['school_id'] if 'school_name' in d: o.school_name = d['school_name'] if 'school_std_code'",
"element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene']",
"if 'school_std_code' in d: o.school_std_code = d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id =",
"d: o.school_std_code = d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id = d['sign_app_id'] if 'status'",
"self._school_id = None self._school_name = None self._school_std_code = None self._sign_app_id = None self._status",
"else: params['status'] = self.status return params @staticmethod def from_alipay_dict(d): if not d: return",
"def school_name(self, value): self._school_name = value @property def school_std_code(self): return self._school_std_code @school_std_code.setter def",
"def business_scene(self): return self._business_scene @business_scene.setter def business_scene(self, value): if isinstance(value, list): self._business_scene =",
"= value @property def isv_name(self): return self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name =",
"def __init__(self): self._business_scene = None self._group_id = None self._isv_name = None self._pid =",
"'sign_app_id' in d: o.sign_app_id = d['sign_app_id'] if 'status' in d: o.status = d['status']",
"len(self.business_scene)): element = self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'):",
"= None self._school_id = None self._school_name = None self._school_std_code = None self._sign_app_id =",
"hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid'] = self.pid if self.school_id: if hasattr(self.school_id,",
"self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id",
"return None o = SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene = d['business_scene'] if",
"def status(self): return self._status @status.setter def status(self, value): self._status = value def to_alipay_dict(self):",
"if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if",
"'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene'] =",
"else: params['school_std_code'] = self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else:",
"if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if",
"-*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def",
"school_name(self, value): self._school_name = value @property def school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self,",
"isinstance(value, list): self._business_scene = list() for i in value: self._business_scene.append(i) @property def group_id(self):",
"value @property def school_std_code(self): return self._school_std_code @school_std_code.setter def school_std_code(self, value): self._school_std_code = value",
"if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if",
"= self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] =",
"self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if self.isv_name: if hasattr(self.isv_name, 'to_alipay_dict'): params['isv_name'] = self.isv_name.to_alipay_dict()",
"self._pid = value @property def school_id(self): return self._school_id @school_id.setter def school_id(self, value): self._school_id",
"= None self._isv_name = None self._pid = None self._school_id = None self._school_name =",
"i in value: self._business_scene.append(i) @property def group_id(self): return self._group_id @group_id.setter def group_id(self, value):",
"hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if self.school_std_code: if hasattr(self.school_std_code,",
"d: o.business_scene = d['business_scene'] if 'group_id' in d: o.group_id = d['group_id'] if 'isv_name'",
"'isv_name' in d: o.isv_name = d['isv_name'] if 'pid' in d: o.pid = d['pid']",
"'school_std_code' in d: o.school_std_code = d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id = d['sign_app_id']",
"params['status'] = self.status return params @staticmethod def from_alipay_dict(d): if not d: return None",
"value @property def school_id(self): return self._school_id @school_id.setter def school_id(self, value): self._school_id = value",
"from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None self._group_id =",
"= self.business_scene[i] if hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] =",
"* class SceneConfigQueryDTO(object): def __init__(self): self._business_scene = None self._group_id = None self._isv_name =",
"def school_id(self, value): self._school_id = value @property def school_name(self): return self._school_name @school_name.setter def",
"@school_id.setter def school_id(self, value): self._school_id = value @property def school_name(self): return self._school_name @school_name.setter",
"else: params['sign_app_id'] = self.sign_app_id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else:",
"self.school_id if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name",
"-*- import json from alipay.aop.api.constant.ParamConstants import * class SceneConfigQueryDTO(object): def __init__(self): self._business_scene =",
"list() for i in value: self._business_scene.append(i) @property def group_id(self): return self._group_id @group_id.setter def",
"o = SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene = d['business_scene'] if 'group_id' in",
"@property def school_name(self): return self._school_name @school_name.setter def school_name(self, value): self._school_name = value @property",
"def business_scene(self, value): if isinstance(value, list): self._business_scene = list() for i in value:",
"= self.pid.to_alipay_dict() else: params['pid'] = self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] =",
"@school_name.setter def school_name(self, value): self._school_name = value @property def school_std_code(self): return self._school_std_code @school_std_code.setter",
"= self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] =",
"for i in value: self._business_scene.append(i) @property def group_id(self): return self._group_id @group_id.setter def group_id(self,",
"d['isv_name'] if 'pid' in d: o.pid = d['pid'] if 'school_id' in d: o.school_id",
"self._group_id = value @property def isv_name(self): return self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name",
"return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id = value @property def status(self): return",
"self._school_std_code = value @property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self, value): self._sign_app_id",
"self.business_scene.to_alipay_dict() else: params['business_scene'] = self.business_scene if self.group_id: if hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict()",
"if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if self.sign_app_id: if",
"hasattr(element, 'to_alipay_dict'): self.business_scene[i] = element.to_alipay_dict() if hasattr(self.business_scene, 'to_alipay_dict'): params['business_scene'] = self.business_scene.to_alipay_dict() else: params['business_scene']",
"params['school_std_code'] = self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id']",
"@property def group_id(self): return self._group_id @group_id.setter def group_id(self, value): self._group_id = value @property",
"isv_name(self, value): self._isv_name = value @property def pid(self): return self._pid @pid.setter def pid(self,",
"if self.school_name: if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if",
"in d: o.group_id = d['group_id'] if 'isv_name' in d: o.isv_name = d['isv_name'] if",
"school_id(self, value): self._school_id = value @property def school_name(self): return self._school_name @school_name.setter def school_name(self,",
"= self.status.to_alipay_dict() else: params['status'] = self.status return params @staticmethod def from_alipay_dict(d): if not",
"if hasattr(self.school_name, 'to_alipay_dict'): params['school_name'] = self.school_name.to_alipay_dict() else: params['school_name'] = self.school_name if self.school_std_code: if",
"self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if self.status:",
"hasattr(self.group_id, 'to_alipay_dict'): params['group_id'] = self.group_id.to_alipay_dict() else: params['group_id'] = self.group_id if self.isv_name: if hasattr(self.isv_name,",
"= value @property def school_name(self): return self._school_name @school_name.setter def school_name(self, value): self._school_name =",
"d: return None o = SceneConfigQueryDTO() if 'business_scene' in d: o.business_scene = d['business_scene']",
"params['pid'] = self.pid if self.school_id: if hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id']",
"= None self._pid = None self._school_id = None self._school_name = None self._school_std_code =",
"school_std_code(self, value): self._school_std_code = value @property def sign_app_id(self): return self._sign_app_id @sign_app_id.setter def sign_app_id(self,",
"def status(self, value): self._status = value def to_alipay_dict(self): params = dict() if self.business_scene:",
"d: o.school_name = d['school_name'] if 'school_std_code' in d: o.school_std_code = d['school_std_code'] if 'sign_app_id'",
"'school_name' in d: o.school_name = d['school_name'] if 'school_std_code' in d: o.school_std_code = d['school_std_code']",
"return self._status @status.setter def status(self, value): self._status = value def to_alipay_dict(self): params =",
"self._school_name = None self._school_std_code = None self._sign_app_id = None self._status = None @property",
"None self._school_name = None self._school_std_code = None self._sign_app_id = None self._status = None",
"= None self._sign_app_id = None self._status = None @property def business_scene(self): return self._business_scene",
"hasattr(self.school_id, 'to_alipay_dict'): params['school_id'] = self.school_id.to_alipay_dict() else: params['school_id'] = self.school_id if self.school_name: if hasattr(self.school_name,",
"self._sign_app_id = None self._status = None @property def business_scene(self): return self._business_scene @business_scene.setter def",
"'pid' in d: o.pid = d['pid'] if 'school_id' in d: o.school_id = d['school_id']",
"to_alipay_dict(self): params = dict() if self.business_scene: if isinstance(self.business_scene, list): for i in range(0,",
"d['school_name'] if 'school_std_code' in d: o.school_std_code = d['school_std_code'] if 'sign_app_id' in d: o.sign_app_id",
"value): self._pid = value @property def school_id(self): return self._school_id @school_id.setter def school_id(self, value):",
"hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] = self.sign_app_id.to_alipay_dict() else: params['sign_app_id'] = self.sign_app_id if self.status: if hasattr(self.status,",
"self._pid = None self._school_id = None self._school_name = None self._school_std_code = None self._sign_app_id",
"= value @property def school_id(self): return self._school_id @school_id.setter def school_id(self, value): self._school_id =",
"if isinstance(self.business_scene, list): for i in range(0, len(self.business_scene)): element = self.business_scene[i] if hasattr(element,",
"params['school_name'] = self.school_name if self.school_std_code: if hasattr(self.school_std_code, 'to_alipay_dict'): params['school_std_code'] = self.school_std_code.to_alipay_dict() else: params['school_std_code']",
"@isv_name.setter def isv_name(self, value): self._isv_name = value @property def pid(self): return self._pid @pid.setter",
"@property def isv_name(self): return self._isv_name @isv_name.setter def isv_name(self, value): self._isv_name = value @property",
"value): self._status = value def to_alipay_dict(self): params = dict() if self.business_scene: if isinstance(self.business_scene,",
"o.business_scene = d['business_scene'] if 'group_id' in d: o.group_id = d['group_id'] if 'isv_name' in",
"params['isv_name'] = self.isv_name if self.pid: if hasattr(self.pid, 'to_alipay_dict'): params['pid'] = self.pid.to_alipay_dict() else: params['pid']",
"= self.school_std_code.to_alipay_dict() else: params['school_std_code'] = self.school_std_code if self.sign_app_id: if hasattr(self.sign_app_id, 'to_alipay_dict'): params['sign_app_id'] ="
] |
[
". import TestController, TestParameterController from threading import Lock TEST_PARAMETER_LOCK = Lock() api.add_resource(TestController, '/api/tests')",
"threading import Lock TEST_PARAMETER_LOCK = Lock() api.add_resource(TestController, '/api/tests') api.add_resource(TestParameterController, '/api/parameters', resource_class_kwargs ={'lock_obj': TEST_PARAMETER_LOCK})",
"from . import TestController, TestParameterController from threading import Lock TEST_PARAMETER_LOCK = Lock() api.add_resource(TestController,",
"from . import app, api from . import TestController, TestParameterController from threading import",
"from threading import Lock TEST_PARAMETER_LOCK = Lock() api.add_resource(TestController, '/api/tests') api.add_resource(TestParameterController, '/api/parameters', resource_class_kwargs ={'lock_obj':",
"import TestController, TestParameterController from threading import Lock TEST_PARAMETER_LOCK = Lock() api.add_resource(TestController, '/api/tests') api.add_resource(TestParameterController,",
". import app, api from . import TestController, TestParameterController from threading import Lock",
"TestParameterController from threading import Lock TEST_PARAMETER_LOCK = Lock() api.add_resource(TestController, '/api/tests') api.add_resource(TestParameterController, '/api/parameters', resource_class_kwargs",
"TestController, TestParameterController from threading import Lock TEST_PARAMETER_LOCK = Lock() api.add_resource(TestController, '/api/tests') api.add_resource(TestParameterController, '/api/parameters',",
"api from . import TestController, TestParameterController from threading import Lock TEST_PARAMETER_LOCK = Lock()",
"app, api from . import TestController, TestParameterController from threading import Lock TEST_PARAMETER_LOCK =",
"import app, api from . import TestController, TestParameterController from threading import Lock TEST_PARAMETER_LOCK"
] |
[
"logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v) for k, v in extras.items()",
"for d in network_deployments if d[\"contract_type\"] == \"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"]",
"accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index += 1 except Exception as err: logger.error(err) pass",
"ecosystem_deployments = config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments = ecosystem[network] deployments = [",
"str(v) for k, v in extras.items() if k != \"list_extras\"}, indent=2) ) return",
"import click from ape.logging import logger def ape_init_extras(accounts, project, config, networks): ecosystem =",
"from ape.logging import logger def ape_init_extras(accounts, project, config, networks): ecosystem = networks.provider.network.ecosystem.name network",
"json.dumps({k: str(v) for k, v in extras.items() if k != \"list_extras\"}, indent=2) )",
"as err: logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v) for k, v",
"= config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments = ecosystem[network] deployments = [ d",
"except Exception as err: logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v) for",
"for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index += 1 except Exception as",
"Add remaining accounts index = 2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct",
"= deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe}",
"extras = {} try: if ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network",
"1 except Exception as err: logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v)",
"contract, **extras, } # Add remaining accounts index = 2 for acct in",
"== \"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic fixtures owner",
"extras = { \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras, }",
"if d[\"contract_type\"] == \"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic",
"= { \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras, } #",
"] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic fixtures owner = accounts.test_accounts[0]",
"Exception as err: logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v) for k,",
"if ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments =",
"\"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras, } # Add remaining",
"networks.provider.network.ecosystem.name network = networks.provider.network.name extras = {} try: if ecosystem in config.deployments: ecosystem_deployments",
"extras[\"test_contract_address\"] = latest_address # Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\")",
"latest_address # Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract =",
"Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras",
"d[\"contract_type\"] == \"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic fixtures",
"d in network_deployments if d[\"contract_type\"] == \"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] =",
"ecosystem_deployments: network_deployments = ecosystem[network] deployments = [ d for d in network_deployments if",
"= acct index += 1 except Exception as err: logger.error(err) pass extras[\"list_extras\"] =",
"= {} try: if ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network in",
"{project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras = { \"owner\": owner, \"sender\": accounts.test_accounts[1],",
"2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index += 1 except Exception",
"ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras = { \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract,",
"deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in",
"fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras =",
"{ \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras, } # Add",
"<gh_stars>0 import json import click from ape.logging import logger def ape_init_extras(accounts, project, config,",
"\"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic fixtures owner =",
"deployments = [ d for d in network_deployments if d[\"contract_type\"] == \"TestContract\" ]",
"= networks.provider.network.name extras = {} try: if ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem]",
"import logger def ape_init_extras(accounts, project, config, networks): ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name",
"if network in ecosystem_deployments: network_deployments = ecosystem[network] deployments = [ d for d",
"in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras = { \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\":",
"= lambda: click.echo( json.dumps({k: str(v) for k, v in extras.items() if k !=",
"in ecosystem_deployments: network_deployments = ecosystem[network] deployments = [ d for d in network_deployments",
"ape_init_extras(accounts, project, config, networks): ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name extras = {}",
"config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments = ecosystem[network] deployments = [ d for",
"d for d in network_deployments if d[\"contract_type\"] == \"TestContract\" ] latest_address = deployments[-1][\"address\"]",
"ape.logging import logger def ape_init_extras(accounts, project, config, networks): ecosystem = networks.provider.network.ecosystem.name network =",
"network_deployments = ecosystem[network] deployments = [ d for d in network_deployments if d[\"contract_type\"]",
"extras[f\"acct{index}\"] = acct index += 1 except Exception as err: logger.error(err) pass extras[\"list_extras\"]",
"= project.FundMe.deploy(sender=owner) extras = { \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract,",
"extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v) for k, v in extras.items() if k",
"networks.provider.network.name extras = {} try: if ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if",
"in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments = ecosystem[network] deployments",
"config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments = ecosystem[network] deployments =",
"owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras, } # Add remaining accounts",
"lambda: click.echo( json.dumps({k: str(v) for k, v in extras.items() if k != \"list_extras\"},",
"= [ d for d in network_deployments if d[\"contract_type\"] == \"TestContract\" ] latest_address",
"json import click from ape.logging import logger def ape_init_extras(accounts, project, config, networks): ecosystem",
"try: if ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments",
"accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras = { \"owner\": owner,",
"\"contract\": contract, **extras, } # Add remaining accounts index = 2 for acct",
"project.FundMe.deploy(sender=owner) extras = { \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras,",
"\"fund_me\": contract, \"contract\": contract, **extras, } # Add remaining accounts index = 2",
"network in ecosystem_deployments: network_deployments = ecosystem[network] deployments = [ d for d in",
"project, config, networks): ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name extras = {} try:",
"err: logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v) for k, v in",
"for k, v in extras.items() if k != \"list_extras\"}, indent=2) ) return extras",
"acct index += 1 except Exception as err: logger.error(err) pass extras[\"list_extras\"] = lambda:",
"\"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras, } # Add remaining accounts index",
"[ d for d in network_deployments if d[\"contract_type\"] == \"TestContract\" ] latest_address =",
"# Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner)",
"import json import click from ape.logging import logger def ape_init_extras(accounts, project, config, networks):",
"logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras = { \"owner\": owner, \"sender\":",
"config, networks): ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name extras = {} try: if",
"contract = project.FundMe.deploy(sender=owner) extras = { \"owner\": owner, \"sender\": accounts.test_accounts[1], \"fund_me\": contract, \"contract\":",
"} # Add remaining accounts index = 2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"]",
"accounts index = 2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index +=",
"click from ape.logging import logger def ape_init_extras(accounts, project, config, networks): ecosystem = networks.provider.network.ecosystem.name",
"= networks.provider.network.ecosystem.name network = networks.provider.network.name extras = {} try: if ecosystem in config.deployments:",
"networks): ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name extras = {} try: if ecosystem",
"ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network in ecosystem_deployments: network_deployments = ecosystem[network]",
"remaining accounts index = 2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index",
"in network_deployments if d[\"contract_type\"] == \"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address",
"accounts.test_accounts[1], \"fund_me\": contract, \"contract\": contract, **extras, } # Add remaining accounts index =",
"index += 1 except Exception as err: logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo(",
"ecosystem[network] deployments = [ d for d in network_deployments if d[\"contract_type\"] == \"TestContract\"",
"latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address # Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying",
"logger def ape_init_extras(accounts, project, config, networks): ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name extras",
"= latest_address # Mimic fixtures owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract",
"# Add remaining accounts index = 2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] =",
"**extras, } # Add remaining accounts index = 2 for acct in accounts.test_accounts[2:]:",
"+= 1 except Exception as err: logger.error(err) pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k:",
"= ecosystem[network] deployments = [ d for d in network_deployments if d[\"contract_type\"] ==",
"ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name extras = {} try: if ecosystem in",
"in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index += 1 except Exception as err: logger.error(err)",
"click.echo( json.dumps({k: str(v) for k, v in extras.items() if k != \"list_extras\"}, indent=2)",
"network_deployments if d[\"contract_type\"] == \"TestContract\" ] latest_address = deployments[-1][\"address\"] extras[\"test_contract_address\"] = latest_address #",
"def ape_init_extras(accounts, project, config, networks): ecosystem = networks.provider.network.ecosystem.name network = networks.provider.network.name extras =",
"owner = accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras = {",
"pass extras[\"list_extras\"] = lambda: click.echo( json.dumps({k: str(v) for k, v in extras.items() if",
"acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index += 1 except Exception as err:",
"network = networks.provider.network.name extras = {} try: if ecosystem in config.deployments: ecosystem_deployments =",
"contract, \"contract\": contract, **extras, } # Add remaining accounts index = 2 for",
"index = 2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index += 1",
"= 2 for acct in accounts.test_accounts[2:]: extras[f\"acct{index}\"] = acct index += 1 except",
"{} try: if ecosystem in config.deployments: ecosystem_deployments = config.deployments[ecosystem] if network in ecosystem_deployments:",
"= accounts.test_accounts[0] logger.info(f\"Deploying {project.FundMe} in ape_console_extras.py\") contract = project.FundMe.deploy(sender=owner) extras = { \"owner\":"
] |
[
"Output noise per voxel (single-dish) for j in range(len(expts)): expt = expts[j] zs,",
"#Tb = baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for zz in z]) #",
"voxel (interferom.) for j in range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt,",
"xi(Rpix) y = k**2. * pk * W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k)",
"MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x = u / nu",
"'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI for a range of redshift",
"expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu'] = 60. # 60",
"= expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu'] = 60. #",
"Calculate signal power as a function of frequency. \"\"\" import numpy as np",
"] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise per",
"= baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu'] = 60. # 60 MHz sigma_60",
"C * (1.+z)**2. / H(z) Tb = baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z,",
"\"\"\" Calculate signal power as a function of frequency. \"\"\" import numpy as",
"baofisher.noise_rms_per_voxel(zc, expt2) # Output data print \"\" print \"-\"*40 print names[j] print \"-\"*40",
"cosmo from units import * import copy nu21 = 1420. # Line frequency",
"(interferom.) for j in range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo,",
"/ (60 MHz)\" % dnu print \"-\"*40 for i in range(zc.size): #sigma_HI =",
"print \"\" print \"-\"*40 print names[j] print \"-\"*40 print \" zc / dz",
"/ (1e6 * expt['nu_line']) / expt['Ddish'] # Beam FWHM rnu = C *",
"Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK) Tsys = expt['Tinst']",
"[uK]\" print \" -- / -- / (%2.2f MHz) / (60 MHz)\" %",
"per voxel (interferom.) for j in range(len(expts)): expt = expts[j] zs, zc =",
"e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1',",
"= 3e8 * (1. + z) / (1e6 * expt['nu_line']) / expt['Ddish'] #",
"sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9,",
"%4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2,",
"signal (mK) Tsys = expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f",
"calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i],",
"3., 100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for zz in",
"'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise per voxel (interferom.) for j in range(len(expts)):",
"Output noise per voxel (interferom.) for j in range(len(expts)): expt = expts[j] zs,",
"* import copy nu21 = 1420. # Line frequency at z=0 # Pre-calculate",
"[uK] / sigma_T [uK]\" print \" -- / -- / (%2.2f MHz) /",
"n_x(x) / nu**2. # n(x) = n(u) * nu^2 # Output data print",
"sky signal (mK) Tsys = expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i], expt) print",
"# Foreground sky signal (mK) Tsys = expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i],",
"P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\")",
"/ (60 MHz)\" % dnu print \"-\"*40 for i in range(zc.size): # Calculate",
"= np.array([calculate_rms(zz, expt) for zz in z]) # Output noise per voxel (single-dish)",
"* expt['dnu'] / nu21 Rpix = Vpix**(1./3.) # Integrate P(k) to get correlation",
"\"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l,",
"e.SKA1SURfull1, e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1',",
"Tsys = expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f",
"for a range of redshift #z = np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z,",
"'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI for a range of",
"SKA RFC P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go') # rms",
"expt2) # Output data print \"\" print \"-\"*40 print names[j] print \"-\"*40 print",
"= 3e8 * (1. + zc[i]) / 1420.e6 Ddish = expt['Ddish'] Tsky =",
"H(z) Tb = baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo) # Calculate pixel",
"a range of redshift #z = np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z, cosmo)",
"import pylab as P import scipy.integrate import baofisher import experiments from experiments import",
"/ sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] * sigma_T [uK] / lambda [m]",
"'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI for a range of redshift #z =",
"print \"%2.2f %4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [",
"% (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ]",
"2.], [155.8, 210.9, 245.6, 260.8], 'bo') # mean Tb, from SKA RFC P.plot([0.5,",
"Rohlfs & Wilson (5th Ed.) l = 3e8 * (1. + zc[i]) /",
"e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output",
"e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2',",
"# Calculate quantities from Eq. 9.38 of Rohlfs & Wilson (5th Ed.) l",
"l = 3e8 * (1. + zc[i]) / 1420.e6 Ddish = expt['Ddish'] Tsky",
"nu**2. # n(x) = n(u) * nu^2 # Output data print \"\" print",
"W_tophat(k, r): return 3. * ( np.sin(k * r) - k * r",
"(5th Ed.) l = 3e8 * (1. + zc[i]) / 1420.e6 Ddish =",
"# Output data print \"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print",
"zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T",
"np.sqrt(xi) # in mK # Choose experiment e = experiments expts = [",
"1420.e6 Ddish = expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal",
"* np.cos(k * r) ) / ((k * r)**3.) def calculate_rms(z, expt): \"\"\"",
"scipy.integrate import baofisher import experiments from experiments import cosmo from units import *",
"/ sigma_T [uK]\" print \" -- / -- / (%2.2f MHz) / (60",
"= baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt)",
"% dnu print \"-\"*40 for i in range(zc.size): # Calculate quantities from Eq.",
"(60 MHz)\" % dnu print \"-\"*40 for i in range(zc.size): # Calculate quantities",
"given redshift. \"\"\" theta_b = 3e8 * (1. + z) / (1e6 *",
"dz / sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] * sigma_T [uK] / lambda",
"3. * ( np.sin(k * r) - k * r * np.cos(k *",
"'SKA1SURfull2' ] # Calculate sigma_HI for a range of redshift #z = np.linspace(1e-2,",
"scipy.integrate.simps(y, k) / (2. * np.pi**2.) # Return rms HI fluctuation return Tb",
"names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \" zc / dz / sqrt[n(u)] * sigma_T",
"dnu print \"-\"*40 for i in range(zc.size): # Calculate quantities from Eq. 9.38",
"= (r(z) * theta_b)**2. * rnu * expt['dnu'] / nu21 Rpix = Vpix**(1./3.)",
"bHI * np.sqrt(xi) # in mK # Choose experiment e = experiments expts",
"= n_x(x) / nu**2. # n(x) = n(u) * nu^2 # Output data",
"given redshift Vpix = (r(z) * theta_b)**2. * rnu * expt['dnu'] / nu21",
"% \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot results P.subplot(111)",
"Pre-calculate cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f = baofisher.background_evolution_splines(cosmo)",
"# Integrate P(k) to get correlation fn. averaged in a ball, xi(Rpix) y",
"Tsys [K]\" print \" -- / -- / (%2.2f MHz) / (60 MHz)\"",
"as a function of frequency. \"\"\" import numpy as np import pylab as",
"rnu * expt['dnu'] / nu21 Rpix = Vpix**(1./3.) # Integrate P(k) to get",
"baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x = u / nu # x =",
"lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6,",
"#n_u = n_x(x) / nu**2. # n(x) = n(u) * nu^2 # Output",
"np.sin(k * r) - k * r * np.cos(k * r) ) /",
"experiments from experiments import cosmo from units import * import copy nu21 =",
"* W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k) / (2. * np.pi**2.) # Return",
"baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu'] = 60. # 60 MHz sigma_60 =",
"/ dz / sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] * sigma_T [uK] /",
"names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] #",
"x = u / (freq [MHz]) #n_u = n_x(x) / nu**2. # n(x)",
"* (1.+z)**2. / H(z) Tb = baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo)",
"k**2. * pk * W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k) / (2. *",
"210.9, 245.6, 260.8], 'bo') # mean Tb, from SKA RFC P.plot([0.5, 1., 1.5,",
"Vpix = (r(z) * theta_b)**2. * rnu * expt['dnu'] / nu21 Rpix =",
"Tb * D(z) * bHI * np.sqrt(xi) # in mK # Choose experiment",
"# Calculate pixel volume at given redshift Vpix = (r(z) * theta_b)**2. *",
"at given redshift Vpix = (r(z) * theta_b)**2. * rnu * expt['dnu'] /",
"zz in z]) # Output noise per voxel (single-dish) for j in range(len(expts)):",
"\"\"\" theta_b = 3e8 * (1. + z) / (1e6 * expt['nu_line']) /",
"expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2',",
"* np.pi**2.) # Return rms HI fluctuation return Tb * D(z) * bHI",
"1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo') # mean Tb, from SKA RFC",
"1420. # Line frequency at z=0 # Pre-calculate cosmological quantities k, pk =",
"r * np.cos(k * r) ) / ((k * r)**3.) def calculate_rms(z, expt):",
"'bo') # mean Tb, from SKA RFC P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0,",
"Foreground sky signal (mK) Tsys = expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i], expt)",
"= u / nu # x = u / (freq [MHz]) #n_u =",
"[ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI",
"label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8],",
"'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise per voxel (interferom.) for j in",
"%8.8f %8.8f %4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit()",
"redshift #z = np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz,",
"1. #baofisher.bias_HI(z, cosmo) # Calculate pixel volume at given redshift Vpix = (r(z)",
"Line frequency at z=0 # Pre-calculate cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H,",
"i in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f %4.4f\" %",
"cosmo) # Calculate pixel volume at given redshift Vpix = (r(z) * theta_b)**2.",
"3e8 * (1. + z) / (1e6 * expt['nu_line']) / expt['Ddish'] # Beam",
"* rnu * expt['dnu'] / nu21 Rpix = Vpix**(1./3.) # Integrate P(k) to",
"u / (freq [MHz]) #n_u = n_x(x) / nu**2. # n(x) = n(u)",
"\" zc / dz / sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] * sigma_T",
"cosmo, dnu=60.) dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu']",
"(1e6 * expt['nu_line']) / expt['Ddish'] # Beam FWHM rnu = C * (1.+z)**2.",
"MHz)\" % dnu print \"-\"*40 for i in range(zc.size): # Calculate quantities from",
"np.pi**2.) # Return rms HI fluctuation return Tb * D(z) * bHI *",
"k * r * np.cos(k * r) ) / ((k * r)**3.) def",
"(mK) Tsys = expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f",
"# 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x = u",
"print \" zc / dz / sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] *",
"nu21 = 1420. # Line frequency at z=0 # Pre-calculate cosmological quantities k,",
"Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3,",
"fluctuation return Tb * D(z) * bHI * np.sqrt(xi) # in mK #",
"P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8,",
"zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2",
"zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T =",
"print \"-\"*40 print \" zc / dz / sigma_T [uK] / sigma_T [uK]\"",
"Rpix = Vpix**(1./3.) # Integrate P(k) to get correlation fn. averaged in a",
"= 1420. # Line frequency at z=0 # Pre-calculate cosmological quantities k, pk",
"from Eq. 9.38 of Rohlfs & Wilson (5th Ed.) l = 3e8 *",
"mK # Choose experiment e = experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1,",
"experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ]",
"= n(u) * nu^2 # Output data print \"\" print \"-\"*40 print names[j],",
"import * import copy nu21 = 1420. # Line frequency at z=0 #",
"as P import scipy.integrate import baofisher import experiments from experiments import cosmo from",
"Tb, from SKA RFC P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go')",
"np.cos(k * r) ) / ((k * r)**3.) def calculate_rms(z, expt): \"\"\" Calculate",
"print \"-\"*40 for i in range(zc.size): # Calculate quantities from Eq. 9.38 of",
"\\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot results P.subplot(111) P.plot(z,",
"lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo') # mean",
"#sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i],",
"= np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for",
"j in range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu",
"(single-dish) for j in range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo,",
"import experiments from experiments import cosmo from units import * import copy nu21",
"mean Tb, from SKA RFC P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4],",
"Output data print \"\" print \"-\"*40 print names[j] print \"-\"*40 print \" zc",
"# Output noise per voxel (single-dish) for j in range(len(expts)): expt = expts[j]",
"60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output data print \"\" print \"-\"*40",
"/ sqrt[n(u)] * sigma_T [uK] / lambda [m] / Tsys [K]\" print \"",
"Return rms HI fluctuation return Tb * D(z) * bHI * np.sqrt(xi) #",
"\"-\"*40 print \" zc / dz / sigma_T [uK] / sigma_T [uK]\" print",
"print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \" zc / dz /",
"calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts",
"in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f %4.4f\" % (zc[i],",
"experiment e = experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2,",
"#n_x = load_interferom_file(expt['n(x)']) #x = u / nu # x = u /",
"\"\"\" import numpy as np import pylab as P import scipy.integrate import baofisher",
"from units import * import copy nu21 = 1420. # Line frequency at",
"= [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise per voxel (interferom.)",
"/ sigma_T [uK] / sigma_T [uK]\" print \" -- / -- / (%2.2f",
"r)**3.) def calculate_rms(z, expt): \"\"\" Calculate RMS of HI signal at a given",
"# Beam FWHM rnu = C * (1.+z)**2. / H(z) Tb = baofisher.Tb(z,",
"% dnu print \"-\"*40 for i in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print",
"28.0, 20.9, 16.4], 'go') # rms Tb, from SKA RFC P.xlabel(\"z\") P.ylabel(\"uK\") P.legend(loc='upper",
"* np.sqrt(xi) # in mK # Choose experiment e = experiments expts =",
"of Rohlfs & Wilson (5th Ed.) l = 3e8 * (1. + zc[i])",
"def W_tophat(k, r): return 3. * ( np.sin(k * r) - k *",
"copy.copy(expt) expt2['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output",
"= expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK) Tsys",
"/ nu21 Rpix = Vpix**(1./3.) # Integrate P(k) to get correlation fn. averaged",
"# Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z,",
"as np import pylab as P import scipy.integrate import baofisher import experiments from",
"to get correlation fn. averaged in a ball, xi(Rpix) y = k**2. *",
"Output data print \"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \"",
"return 3. * ( np.sin(k * r) - k * r * np.cos(k",
"(60 MHz)\" % dnu print \"-\"*40 for i in range(zc.size): #sigma_HI = calculate_rms(zc[i],",
"dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60.",
"Eq. 9.38 of Rohlfs & Wilson (5th Ed.) l = 3e8 * (1.",
"= expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f %8.8f",
"range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu']",
"expt): \"\"\" Calculate RMS of HI signal at a given redshift. \"\"\" theta_b",
"r) ) / ((k * r)**3.) def calculate_rms(z, expt): \"\"\" Calculate RMS of",
"(zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names",
"theta_b = 3e8 * (1. + z) / (1e6 * expt['nu_line']) / expt['Ddish']",
"+ z) / (1e6 * expt['nu_line']) / expt['Ddish'] # Beam FWHM rnu =",
"%4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot results",
") / ((k * r)**3.) def calculate_rms(z, expt): \"\"\" Calculate RMS of HI",
"HI signal at a given redshift. \"\"\" theta_b = 3e8 * (1. +",
"pk * W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k) / (2. * np.pi**2.) #",
"1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z,",
"P import scipy.integrate import baofisher import experiments from experiments import cosmo from units",
"e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2',",
"P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo') # mean Tb, from",
"results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4,",
"expt['Tinst'] + Tsky #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f %8.8f %4.4f",
"# Output data print \"\" print \"-\"*40 print names[j] print \"-\"*40 print \"",
"1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo') # mean Tb, from SKA",
"xi = scipy.integrate.simps(y, k) / (2. * np.pi**2.) # Return rms HI fluctuation",
"e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1',",
"MHz)\" % dnu print \"-\"*40 for i in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt)",
"baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt)",
"100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. # 60 MHz sigma_60 =",
"= u / (freq [MHz]) #n_u = n_x(x) / nu**2. # n(x) =",
"for i in range(zc.size): # Calculate quantities from Eq. 9.38 of Rohlfs &",
"\"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \" zc / dz / sqrt[n(u)]",
"load_interferom_file(expt['n(x)']) #x = u / nu # x = u / (freq [MHz])",
"* expt['nu_line']) / expt['Ddish'] # Beam FWHM rnu = C * (1.+z)**2. /",
"60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output data print \"\"",
"zc / dz / sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] * sigma_T [uK]",
"= baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for zz in z]) # Output",
"# Calculate sigma_HI for a range of redshift #z = np.linspace(1e-2, 3., 100)",
"r, D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3. * ( np.sin(k",
"from experiments import cosmo from units import * import copy nu21 = 1420.",
"(1.+z)**2. / H(z) Tb = baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo) #",
"expt2 = copy.copy(expt) expt2['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2)",
"-- / (%2.2f MHz) / (60 MHz)\" % dnu print \"-\"*40 for i",
"MHz) / (60 MHz)\" % dnu print \"-\"*40 for i in range(zc.size): #sigma_HI",
"/ nu # x = u / (freq [MHz]) #n_u = n_x(x) /",
"in mK # Choose experiment e = experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2,",
"expt['Ddish'] # Beam FWHM rnu = C * (1.+z)**2. / H(z) Tb =",
"60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x = u /",
"3e8 * (1. + zc[i]) / 1420.e6 Ddish = expt['Ddish'] Tsky = 60e3",
"# x = u / (freq [MHz]) #n_u = n_x(x) / nu**2. #",
"= calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\" % \\ (zc[i],",
"range of redshift #z = np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI",
"D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3. * ( np.sin(k *",
"100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for zz in z])",
"sigma_T [uK]\" print \" -- / -- / (%2.2f MHz) / (60 MHz)\"",
"2.], [40.1, 28.0, 20.9, 16.4], 'go') # rms Tb, from SKA RFC P.xlabel(\"z\")",
"-- / -- / (%2.2f MHz) / (60 MHz)\" % dnu print \"-\"*40",
"sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc,",
"(%2.2f MHz) / (60 MHz)\" % dnu print \"-\"*40 for i in range(zc.size):",
"'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI for a range",
"e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ]",
"pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return",
"redshift Vpix = (r(z) * theta_b)**2. * rnu * expt['dnu'] / nu21 Rpix",
"[ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise per voxel (interferom.) for",
"volume at given redshift Vpix = (r(z) * theta_b)**2. * rnu * expt['dnu']",
"correlation fn. averaged in a ball, xi(Rpix) y = k**2. * pk *",
"rnu = C * (1.+z)**2. / H(z) Tb = baofisher.Tb(z, cosmo) bHI =",
"expt) expt2 = copy.copy(expt) expt2['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc,",
"/ H(z) Tb = baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo) # Calculate",
"Wilson (5th Ed.) l = 3e8 * (1. + zc[i]) / 1420.e6 Ddish",
"= baofisher.noise_rms_per_voxel(zc, expt2) # Output data print \"\" print \"-\"*40 print names[j] print",
"\"-\"*40 print \" zc / dz / sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)]",
"names[j] print \"-\"*40 print \" zc / dz / sigma_T [uK] / sigma_T",
"return Tb * D(z) * bHI * np.sqrt(xi) # in mK # Choose",
"print \"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i],",
"# n(x) = n(u) * nu^2 # Output data print \"\" print \"-\"*40",
"[m] / Tsys [K]\" print \" -- / -- / (%2.2f MHz) /",
"per voxel (single-dish) for j in range(len(expts)): expt = expts[j] zs, zc =",
"zc / dz / sigma_T [uK] / sigma_T [uK]\" print \" -- /",
"Calculate RMS of HI signal at a given redshift. \"\"\" theta_b = 3e8",
"z=0 # Pre-calculate cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f",
"baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x",
"import numpy as np import pylab as P import scipy.integrate import baofisher import",
"e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2',",
"np import pylab as P import scipy.integrate import baofisher import experiments from experiments",
"#!/usr/bin/python \"\"\" Calculate signal power as a function of frequency. \"\"\" import numpy",
"averaged in a ball, xi(Rpix) y = k**2. * pk * W_tophat(k, Rpix)",
"* pk * W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k) / (2. * np.pi**2.)",
"* r) ) / ((k * r)**3.) def calculate_rms(z, expt): \"\"\" Calculate RMS",
"* r)**3.) def calculate_rms(z, expt): \"\"\" Calculate RMS of HI signal at a",
"[155.8, 210.9, 245.6, 260.8], 'bo') # mean Tb, from SKA RFC P.plot([0.5, 1.,",
"'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI for a",
"[ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2'",
"expt) print \"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i],",
"P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go') # rms Tb, from",
"\" -- / -- / (%2.2f MHz) / (60 MHz)\" % dnu print",
"for i in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f %4.4f\"",
"(zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot results P.subplot(111) P.plot(z, Tb*1e3,",
"dnu=60.) dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu'] =",
"(r(z) * theta_b)**2. * rnu * expt['dnu'] / nu21 Rpix = Vpix**(1./3.) #",
"Tsky #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\" %",
"numpy as np import pylab as P import scipy.integrate import baofisher import experiments",
"Vpix**(1./3.) # Integrate P(k) to get correlation fn. averaged in a ball, xi(Rpix)",
"sigma_HI for a range of redshift #z = np.linspace(1e-2, 3., 100) #Tb =",
"dnu print \"-\"*40 for i in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f",
"of HI signal at a given redshift. \"\"\" theta_b = 3e8 * (1.",
"* ( np.sin(k * r) - k * r * np.cos(k * r)",
"z]) # Output noise per voxel (single-dish) for j in range(len(expts)): expt =",
"1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\")",
"/ lambda [m] / Tsys [K]\" print \" -- / -- / (%2.2f",
"%8.8f %4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() #",
"from SKA RFC P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go') #",
"P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo')",
"= expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea'] =",
"k) / (2. * np.pi**2.) # Return rms HI fluctuation return Tb *",
"range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i],",
"expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. # 60 MHz",
"print \"-\"*40 print \" zc / dz / sqrt[n(u)] * sigma_T [uK] /",
"- k * r * np.cos(k * r) ) / ((k * r)**3.)",
"\"-\"*40 print names[j] print \"-\"*40 print \" zc / dz / sigma_T [uK]",
"/ (%2.2f MHz) / (60 MHz)\" % dnu print \"-\"*40 for i in",
"zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names =",
"= [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1',",
"# Line frequency at z=0 # Pre-calculate cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T",
"HI fluctuation return Tb * D(z) * bHI * np.sqrt(xi) # in mK",
"signal power as a function of frequency. \"\"\" import numpy as np import",
"RMS of HI signal at a given redshift. \"\"\" theta_b = 3e8 *",
"/ expt['Ddish'] # Beam FWHM rnu = C * (1.+z)**2. / H(z) Tb",
"n(u) * nu^2 # Output data print \"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\"",
"sigma_T [uK] / sqrt[n(u)] * sigma_T [uK] / lambda [m] / Tsys [K]\"",
"Rpix) xi = scipy.integrate.simps(y, k) / (2. * np.pi**2.) # Return rms HI",
"zc[i]) / 1420.e6 Ddish = expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground",
"experiments import cosmo from units import * import copy nu21 = 1420. #",
"= C * (1.+z)**2. / H(z) Tb = baofisher.Tb(z, cosmo) bHI = 1.",
"data print \"\" print \"-\"*40 print names[j] print \"-\"*40 print \" zc /",
"[uK] / lambda [m] / Tsys [K]\" print \" -- / -- /",
"[uK] / sqrt[n(u)] * sigma_T [uK] / lambda [m] / Tsys [K]\" print",
"D(z) * bHI * np.sqrt(xi) # in mK # Choose experiment e =",
"P(k) to get correlation fn. averaged in a ball, xi(Rpix) y = k**2.",
"n(x) = n(u) * nu^2 # Output data print \"\" print \"-\"*40 print",
"] # Output noise per voxel (interferom.) for j in range(len(expts)): expt =",
"= load_interferom_file(expt['n(x)']) #x = u / nu # x = u / (freq",
"#x = u / nu # x = u / (freq [MHz]) #n_u",
"baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt)",
"for zz in z]) # Output noise per voxel (single-dish) for j in",
"e = experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1,",
"quantities from Eq. 9.38 of Rohlfs & Wilson (5th Ed.) l = 3e8",
"sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo') #",
"ball, xi(Rpix) y = k**2. * pk * W_tophat(k, Rpix) xi = scipy.integrate.simps(y,",
"= baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 =",
"/ 1420.e6 Ddish = expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky",
"= 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output data print",
"import scipy.integrate import baofisher import experiments from experiments import cosmo from units import",
"'SKA1MIDfull2' ] # Output noise per voxel (interferom.) for j in range(len(expts)): expt",
"\"\" print \"-\"*40 print names[j] print \"-\"*40 print \" zc / dz /",
"Ed.) l = 3e8 * (1. + zc[i]) / 1420.e6 Ddish = expt['Ddish']",
"pixel volume at given redshift Vpix = (r(z) * theta_b)**2. * rnu *",
"16.4], 'go') # rms Tb, from SKA RFC P.xlabel(\"z\") P.ylabel(\"uK\") P.legend(loc='upper left') P.show()",
"fn. averaged in a ball, xi(Rpix) y = k**2. * pk * W_tophat(k,",
"zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4,",
"function of frequency. \"\"\" import numpy as np import pylab as P import",
"#baofisher.bias_HI(z, cosmo) # Calculate pixel volume at given redshift Vpix = (r(z) *",
"1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1',",
"label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo') # mean Tb,",
"redshift. \"\"\" theta_b = 3e8 * (1. + z) / (1e6 * expt['nu_line'])",
"# mean Tb, from SKA RFC P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9,",
"a ball, xi(Rpix) y = k**2. * pk * W_tophat(k, Rpix) xi =",
"e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1',",
"at a given redshift. \"\"\" theta_b = 3e8 * (1. + z) /",
"\" zc / dz / sigma_T [uK] / sigma_T [uK]\" print \" --",
"RFC P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go') # rms Tb,",
"/ (freq [MHz]) #n_u = n_x(x) / nu**2. # n(x) = n(u) *",
"#z = np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt)",
"%4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3) exit() # Plot",
"* bHI * np.sqrt(xi) # in mK # Choose experiment e = experiments",
"/ dz / sigma_T [uK] / sigma_T [uK]\" print \" -- / --",
"i in range(zc.size): # Calculate quantities from Eq. 9.38 of Rohlfs & Wilson",
"# Return rms HI fluctuation return Tb * D(z) * bHI * np.sqrt(xi)",
"cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for zz in z]) # Output noise per",
"* sigma_T [uK] / sqrt[n(u)] * sigma_T [uK] / lambda [m] / Tsys",
"calculate_rms(z, expt): \"\"\" Calculate RMS of HI signal at a given redshift. \"\"\"",
"print names[j] print \"-\"*40 print \" zc / dz / sigma_T [uK] /",
"a function of frequency. \"\"\" import numpy as np import pylab as P",
"%4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1,",
"cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f = baofisher.background_evolution_splines(cosmo) def",
"\"-\"*40 for i in range(zc.size): # Calculate quantities from Eq. 9.38 of Rohlfs",
"= k**2. * pk * W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k) / (2.",
"expt) print \"%2.2f %4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts =",
"MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output data print \"\" print \"-\"*40 print",
"baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3. * ( np.sin(k * r) - k",
"= 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. # 60 MHz sigma_60",
"= baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x = u / nu # x",
"label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5, 2.],",
"#sigma_HI = np.array([calculate_rms(zz, expt) for zz in z]) # Output noise per voxel",
"data print \"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \" zc",
"in range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu =",
"= 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK) Tsys = expt['Tinst'] +",
"expt['dnu'] / nu21 Rpix = Vpix**(1./3.) # Integrate P(k) to get correlation fn.",
"20.9, 16.4], 'go') # rms Tb, from SKA RFC P.xlabel(\"z\") P.ylabel(\"uK\") P.legend(loc='upper left')",
"* r * np.cos(k * r) ) / ((k * r)**3.) def calculate_rms(z,",
"W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k) / (2. * np.pi**2.) # Return rms",
"in a ball, xi(Rpix) y = k**2. * pk * W_tophat(k, Rpix) xi",
"(1. + z) / (1e6 * expt['nu_line']) / expt['Ddish'] # Beam FWHM rnu",
"Calculate sigma_HI for a range of redshift #z = np.linspace(1e-2, 3., 100) #Tb",
"= 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x",
"* nu^2 # Output data print \"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print",
"Ddish = expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK)",
"9.38 of Rohlfs & Wilson (5th Ed.) l = 3e8 * (1. +",
"P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5,",
"expt['nu_line']) / expt['Ddish'] # Beam FWHM rnu = C * (1.+z)**2. / H(z)",
"quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k,",
"MHz) / (60 MHz)\" % dnu print \"-\"*40 for i in range(zc.size): #",
"u / nu # x = u / (freq [MHz]) #n_u = n_x(x)",
"* (1. + z) / (1e6 * expt['nu_line']) / expt['Ddish'] # Beam FWHM",
"baofisher import experiments from experiments import cosmo from units import * import copy",
"expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] sigma_T",
"(2. * np.pi**2.) # Return rms HI fluctuation return Tb * D(z) *",
"f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3. * ( np.sin(k * r)",
"( np.sin(k * r) - k * r * np.cos(k * r) )",
"range(zc.size): # Calculate quantities from Eq. 9.38 of Rohlfs & Wilson (5th Ed.)",
"Calculate quantities from Eq. 9.38 of Rohlfs & Wilson (5th Ed.) l =",
"= baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc,",
"expt['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)'])",
"expt) for zz in z]) # Output noise per voxel (single-dish) for j",
"exit() # Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\")",
"baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for zz in z]) # Output noise",
"(300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK) Tsys = expt['Tinst'] + Tsky #sigma_HI =",
"print \" -- / -- / (%2.2f MHz) / (60 MHz)\" % dnu",
"1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [",
"] # Calculate sigma_HI for a range of redshift #z = np.linspace(1e-2, 3.,",
"60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x =",
"y = k**2. * pk * W_tophat(k, Rpix) xi = scipy.integrate.simps(y, k) /",
"of redshift #z = np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI =",
"zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt)",
"/ Tsys [K]\" print \" -- / -- / (%2.2f MHz) / (60",
"z) / (1e6 * expt['nu_line']) / expt['Ddish'] # Beam FWHM rnu = C",
"noise per voxel (single-dish) for j in range(len(expts)): expt = expts[j] zs, zc",
"noise per voxel (interferom.) for j in range(len(expts)): expt = expts[j] zs, zc",
"lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1., 1.5,",
"%4.4f %8.8f %8.8f %4.4f %4.4f\" % \\ (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i], l, Tsys/1e3)",
"print \"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \" zc /",
"nu21 Rpix = Vpix**(1./3.) # Integrate P(k) to get correlation fn. averaged in",
"& Wilson (5th Ed.) l = 3e8 * (1. + zc[i]) / 1420.e6",
"= expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] sigma_T =",
"# Choose experiment e = experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2,",
"Tb = baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo) # Calculate pixel volume",
"sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x = load_interferom_file(expt['n(x)']) #x = u / nu #",
"# Output noise per voxel (interferom.) for j in range(len(expts)): expt = expts[j]",
"# in mK # Choose experiment e = experiments expts = [ e.SKA1MIDbase1,",
"* theta_b)**2. * rnu * expt['dnu'] / nu21 Rpix = Vpix**(1./3.) # Integrate",
"/ nu**2. # n(x) = n(u) * nu^2 # Output data print \"\"",
"245.6, 260.8], 'bo') # mean Tb, from SKA RFC P.plot([0.5, 1., 1.5, 2.],",
"1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go') # rms Tb, from SKA",
"baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo) # Calculate pixel volume at given",
"expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea']",
"60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK) Tsys = expt['Tinst'] + Tsky",
"theta_b)**2. * rnu * expt['dnu'] / nu21 Rpix = Vpix**(1./3.) # Integrate P(k)",
"/ (2. * np.pi**2.) # Return rms HI fluctuation return Tb * D(z)",
"* r) - k * r * np.cos(k * r) ) / ((k",
"\"-\"*40 for i in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f",
"= experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2",
"in range(zc.size): # Calculate quantities from Eq. 9.38 of Rohlfs & Wilson (5th",
"sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] * sigma_T [uK] / lambda [m] /",
"= scipy.integrate.simps(y, k) / (2. * np.pi**2.) # Return rms HI fluctuation return",
"r) - k * r * np.cos(k * r) ) / ((k *",
"= expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. #",
"bHI = 1. #baofisher.bias_HI(z, cosmo) # Calculate pixel volume at given redshift Vpix",
"a given redshift. \"\"\" theta_b = 3e8 * (1. + z) / (1e6",
"import cosmo from units import * import copy nu21 = 1420. # Line",
"/ -- / (%2.2f MHz) / (60 MHz)\" % dnu print \"-\"*40 for",
"expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK) Tsys =",
"frequency at z=0 # Pre-calculate cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r,",
"expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc,",
"e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] #",
"cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo) # Calculate pixel volume at given redshift",
"= [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names =",
"sqrt[n(u)] * sigma_T [uK] / lambda [m] / Tsys [K]\" print \" --",
"nu^2 # Output data print \"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40",
"= Vpix**(1./3.) # Integrate P(k) to get correlation fn. averaged in a ball,",
"l, Tsys/1e3) exit() # Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3,",
"'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI for",
"print \" zc / dz / sigma_T [uK] / sigma_T [uK]\" print \"",
"rms HI fluctuation return Tb * D(z) * bHI * np.sqrt(xi) # in",
"/ ((k * r)**3.) def calculate_rms(z, expt): \"\"\" Calculate RMS of HI signal",
"Calculate pixel volume at given redshift Vpix = (r(z) * theta_b)**2. * rnu",
"expt) #n_x = load_interferom_file(expt['n(x)']) #x = u / nu # x = u",
"def calculate_rms(z, expt): \"\"\" Calculate RMS of HI signal at a given redshift.",
"# 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output data print \"\" print",
"k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r):",
"= [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate",
"expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names",
"import copy nu21 = 1420. # Line frequency at z=0 # Pre-calculate cosmological",
"voxel (single-dish) for j in range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt,",
"Choose experiment e = experiments expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1,",
"(1. + zc[i]) / 1420.e6 Ddish = expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55",
"sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu'] = 60. # 60 MHz",
"names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise per voxel",
"units import * import copy nu21 = 1420. # Line frequency at z=0",
"for j in range(len(expts)): expt = expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.)",
"= calculate_rms(zc[i], expt) print \"%2.2f %4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i])",
"print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \" zc / dz / sqrt[n(u)] *",
"(freq [MHz]) #n_u = n_x(x) / nu**2. # n(x) = n(u) * nu^2",
"nu # x = u / (freq [MHz]) #n_u = n_x(x) / nu**2.",
"[K]\" print \" -- / -- / (%2.2f MHz) / (60 MHz)\" %",
"sigma_T [uK] / sigma_T [uK]\" print \" -- / -- / (%2.2f MHz)",
"[MHz]) #n_u = n_x(x) / nu**2. # n(x) = n(u) * nu^2 #",
"* sigma_T [uK] / lambda [m] / Tsys [K]\" print \" -- /",
"import baofisher import experiments from experiments import cosmo from units import * import",
"get correlation fn. averaged in a ball, xi(Rpix) y = k**2. * pk",
"copy nu21 = 1420. # Line frequency at z=0 # Pre-calculate cosmological quantities",
"] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ]",
"cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu']",
"FWHM rnu = C * (1.+z)**2. / H(z) Tb = baofisher.Tb(z, cosmo) bHI",
"expt2['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output data",
"expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] = 60. # 60",
"= baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3. * ( np.sin(k * r) -",
"Integrate P(k) to get correlation fn. averaged in a ball, xi(Rpix) y =",
"np.linspace(1e-2, 3., 100) #Tb = baofisher.Tb(z, cosmo) #sigma_HI = np.array([calculate_rms(zz, expt) for zz",
"\"(INTERFEROMETER)\" print \"-\"*40 print \" zc / dz / sqrt[n(u)] * sigma_T [uK]",
"260.8], 'bo') # mean Tb, from SKA RFC P.plot([0.5, 1., 1.5, 2.], [40.1,",
"+ Tsky #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\"",
"r): return 3. * ( np.sin(k * r) - k * r *",
"%4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2",
"signal at a given redshift. \"\"\" theta_b = 3e8 * (1. + z)",
"\"\" print \"-\"*40 print names[j], \"(INTERFEROMETER)\" print \"-\"*40 print \" zc / dz",
"# Pre-calculate cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f =",
"e.SKA1SURfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2'",
"* (1. + zc[i]) / 1420.e6 Ddish = expt['Ddish'] Tsky = 60e3 *",
"= np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3.",
"Tsys/1e3) exit() # Plot results P.subplot(111) P.plot(z, Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4,",
"np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3. *",
"dnu=60.) dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2. sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt) expt['dnu'] =",
"\"\"\" Calculate RMS of HI signal at a given redshift. \"\"\" theta_b =",
"at z=0 # Pre-calculate cosmological quantities k, pk = np.genfromtxt(\"cache_pk.dat\")[:-1].T H, r, D,",
"1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go') # rms Tb, from SKA RFC",
"= copy.copy(expt) expt2['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) #",
"H, r, D, f = baofisher.background_evolution_splines(cosmo) def W_tophat(k, r): return 3. * (",
"= baofisher.Tb(z, cosmo) bHI = 1. #baofisher.bias_HI(z, cosmo) # Calculate pixel volume at",
"expts[j] zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.) dnu = expt['dnu'] expt['Sarea'] = 100.*(D2RAD)**2.",
"print \"-\"*40 print names[j] print \"-\"*40 print \" zc / dz / sigma_T",
"print \"-\"*40 for i in range(zc.size): #sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f",
"power as a function of frequency. \"\"\" import numpy as np import pylab",
"[40.1, 28.0, 20.9, 16.4], 'go') # rms Tb, from SKA RFC P.xlabel(\"z\") P.ylabel(\"uK\")",
"dnu = expt['dnu'] sigma_T = baofisher.noise_rms_per_voxel(zc, expt) expt2 = copy.copy(expt) expt2['dnu'] = 60.",
"#sigma_HI = calculate_rms(zc[i], expt) print \"%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f\" % \\",
"Tb*1e3, lw=1.4, label=\"$T_b(z)$\") P.plot(z, sigma_HI*1e3, lw=1.4, label=\"$\\sigma_\\mathrm{HI}(z)$\") P.plot(z, sigma_T*1e3, lw=1.4, label=\"$\\sigma_T(z)$\") P.plot([0.5, 1.,",
"Beam FWHM rnu = C * (1.+z)**2. / H(z) Tb = baofisher.Tb(z, cosmo)",
"* (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK) Tsys = expt['Tinst'] + Tsky #sigma_HI",
"sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2) # Output data print \"\" print \"-\"*40 print names[j]",
"'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ] # Calculate sigma_HI for a range of redshift #z",
"np.array([calculate_rms(zz, expt) for zz in z]) # Output noise per voxel (single-dish) for",
"in z]) # Output noise per voxel (single-dish) for j in range(len(expts)): expt",
"sigma_T [uK] / lambda [m] / Tsys [K]\" print \" -- / --",
"dz / sigma_T [uK] / sigma_T [uK]\" print \" -- / -- /",
"\"%2.2f %4.4f %4.4f %4.4f\" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i]) expts = [ e.SKA1MIDbase1,",
"+ zc[i]) / 1420.e6 Ddish = expt['Ddish'] Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 #",
"e.SKA1MIDfull2 ] names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise",
"of frequency. \"\"\" import numpy as np import pylab as P import scipy.integrate",
"* D(z) * bHI * np.sqrt(xi) # in mK # Choose experiment e",
"lambda [m] / Tsys [K]\" print \" -- / -- / (%2.2f MHz)",
"pylab as P import scipy.integrate import baofisher import experiments from experiments import cosmo",
"frequency. \"\"\" import numpy as np import pylab as P import scipy.integrate import",
"expt) expt['dnu'] = 60. # 60 MHz sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt) #n_x =",
"= 1. #baofisher.bias_HI(z, cosmo) # Calculate pixel volume at given redshift Vpix =",
"'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ] # Output noise per voxel (interferom.) for j",
"[ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2, e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ] names = [",
"((k * r)**3.) def calculate_rms(z, expt): \"\"\" Calculate RMS of HI signal at"
] |
[
"# setup: __version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved individual parsers",
"# Notes: # this should be imported as a python module using 'from",
"as BNF from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from .Hybrid import HybridExpressionParser from",
"python module using 'from tra_analysis.Equation import parser' # setup: __version__ = \"0.0.4-alpha\" __changelog__",
"using 'from tra_analysis.Equation import parser' # setup: __version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog:",
"readded old regex based parser as RegexInplaceParser 0.0.2-alpha: - wrote BNF using pyparsing",
"BNF using pyparsing and uses a BNF metasyntax - renamed this submodule parser",
"import BNF as BNF from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from .Hybrid import",
"and ported here \"\"\" __author__ = ( \"<NAME> <<EMAIL>>\", ) __all__ = {",
"individual parsers to their own files 0.0.3-alpha: - readded old regex based parser",
") __all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import BNF as",
"a python module using 'from tra_analysis.Equation import parser' # setup: __version__ = \"0.0.4-alpha\"",
"setup: __version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved individual parsers to",
".BNF import BNF as BNF from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from .Hybrid",
"tra_analysis.Equation import parser' # setup: __version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: -",
"be imported as a python module using 'from tra_analysis.Equation import parser' # setup:",
"<NAME> # Notes: # this should be imported as a python module using",
"'from tra_analysis.Equation import parser' # setup: __version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha:",
"as RegexInplaceParser 0.0.2-alpha: - wrote BNF using pyparsing and uses a BNF metasyntax",
".RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from .Hybrid import HybridExpressionParser from .Hybrid_Utils import equation_base,",
"__all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import BNF as BNF",
"by <NAME> # Notes: # this should be imported as a python module",
"to their own files 0.0.3-alpha: - readded old regex based parser as RegexInplaceParser",
"parser as RegexInplaceParser 0.0.2-alpha: - wrote BNF using pyparsing and uses a BNF",
"moved individual parsers to their own files 0.0.3-alpha: - readded old regex based",
"from .BNF import BNF as BNF from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from",
"parser 0.0.1-alpha: - took items from equation.ipynb and ported here \"\"\" __author__ =",
"{ \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import BNF as BNF from .RegexInplaceParser",
"= ( \"<NAME> <<EMAIL>>\", ) __all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from",
"\"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import BNF as BNF from .RegexInplaceParser import RegexInplaceParser",
"Team 2022: Expression submodule # Written by <NAME> # Notes: # this should",
"import parser' # setup: __version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved",
"submodule # Written by <NAME> # Notes: # this should be imported as",
"\"\"\" __author__ = ( \"<NAME> <<EMAIL>>\", ) __all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\"",
"- took items from equation.ipynb and ported here \"\"\" __author__ = ( \"<NAME>",
"\"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved individual parsers to their own files",
"renamed this submodule parser 0.0.1-alpha: - took items from equation.ipynb and ported here",
"- moved individual parsers to their own files 0.0.3-alpha: - readded old regex",
"pyparsing and uses a BNF metasyntax - renamed this submodule parser 0.0.1-alpha: -",
"own files 0.0.3-alpha: - readded old regex based parser as RegexInplaceParser 0.0.2-alpha: -",
"from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from .Hybrid import HybridExpressionParser from .Hybrid_Utils import",
"ported here \"\"\" __author__ = ( \"<NAME> <<EMAIL>>\", ) __all__ = { \"BNF\",",
"files 0.0.3-alpha: - readded old regex based parser as RegexInplaceParser 0.0.2-alpha: - wrote",
"based parser as RegexInplaceParser 0.0.2-alpha: - wrote BNF using pyparsing and uses a",
"# Written by <NAME> # Notes: # this should be imported as a",
"__changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved individual parsers to their own files 0.0.3-alpha:",
"- renamed this submodule parser 0.0.1-alpha: - took items from equation.ipynb and ported",
"Titan Robotics Team 2022: Expression submodule # Written by <NAME> # Notes: #",
"submodule parser 0.0.1-alpha: - took items from equation.ipynb and ported here \"\"\" __author__",
"took items from equation.ipynb and ported here \"\"\" __author__ = ( \"<NAME> <<EMAIL>>\",",
"parser' # setup: __version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved individual",
"import RegexInplaceParser as RegexInplaceParser from .Hybrid import HybridExpressionParser from .Hybrid_Utils import equation_base, Core",
"Robotics Team 2022: Expression submodule # Written by <NAME> # Notes: # this",
"} from .BNF import BNF as BNF from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser",
"metasyntax - renamed this submodule parser 0.0.1-alpha: - took items from equation.ipynb and",
"from equation.ipynb and ported here \"\"\" __author__ = ( \"<NAME> <<EMAIL>>\", ) __all__",
"equation.ipynb and ported here \"\"\" __author__ = ( \"<NAME> <<EMAIL>>\", ) __all__ =",
"wrote BNF using pyparsing and uses a BNF metasyntax - renamed this submodule",
"uses a BNF metasyntax - renamed this submodule parser 0.0.1-alpha: - took items",
"here \"\"\" __author__ = ( \"<NAME> <<EMAIL>>\", ) __all__ = { \"BNF\", \"RegexInplaceParser\",",
"\"<NAME> <<EMAIL>>\", ) __all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import",
"BNF from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from .Hybrid import HybridExpressionParser from .Hybrid_Utils",
"- wrote BNF using pyparsing and uses a BNF metasyntax - renamed this",
"imported as a python module using 'from tra_analysis.Equation import parser' # setup: __version__",
"0.0.4-alpha: - moved individual parsers to their own files 0.0.3-alpha: - readded old",
"0.0.1-alpha: - took items from equation.ipynb and ported here \"\"\" __author__ = (",
"old regex based parser as RegexInplaceParser 0.0.2-alpha: - wrote BNF using pyparsing and",
"\"HybridExpressionParser\" } from .BNF import BNF as BNF from .RegexInplaceParser import RegexInplaceParser as",
"this submodule parser 0.0.1-alpha: - took items from equation.ipynb and ported here \"\"\"",
"<<EMAIL>>\", ) __all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import BNF",
"using pyparsing and uses a BNF metasyntax - renamed this submodule parser 0.0.1-alpha:",
"Written by <NAME> # Notes: # this should be imported as a python",
"__version__ = \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved individual parsers to their",
"module using 'from tra_analysis.Equation import parser' # setup: __version__ = \"0.0.4-alpha\" __changelog__ =",
"this should be imported as a python module using 'from tra_analysis.Equation import parser'",
"a BNF metasyntax - renamed this submodule parser 0.0.1-alpha: - took items from",
"2022: Expression submodule # Written by <NAME> # Notes: # this should be",
"as a python module using 'from tra_analysis.Equation import parser' # setup: __version__ =",
"- readded old regex based parser as RegexInplaceParser 0.0.2-alpha: - wrote BNF using",
"Expression submodule # Written by <NAME> # Notes: # this should be imported",
"parsers to their own files 0.0.3-alpha: - readded old regex based parser as",
"items from equation.ipynb and ported here \"\"\" __author__ = ( \"<NAME> <<EMAIL>>\", )",
"RegexInplaceParser 0.0.2-alpha: - wrote BNF using pyparsing and uses a BNF metasyntax -",
"and uses a BNF metasyntax - renamed this submodule parser 0.0.1-alpha: - took",
"should be imported as a python module using 'from tra_analysis.Equation import parser' #",
"# Titan Robotics Team 2022: Expression submodule # Written by <NAME> # Notes:",
"regex based parser as RegexInplaceParser 0.0.2-alpha: - wrote BNF using pyparsing and uses",
"= \"\"\"changelog: 0.0.4-alpha: - moved individual parsers to their own files 0.0.3-alpha: -",
"Notes: # this should be imported as a python module using 'from tra_analysis.Equation",
"\"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import BNF as BNF from .RegexInplaceParser import",
"BNF as BNF from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser from .Hybrid import HybridExpressionParser",
"# this should be imported as a python module using 'from tra_analysis.Equation import",
"BNF metasyntax - renamed this submodule parser 0.0.1-alpha: - took items from equation.ipynb",
"0.0.2-alpha: - wrote BNF using pyparsing and uses a BNF metasyntax - renamed",
"= \"0.0.4-alpha\" __changelog__ = \"\"\"changelog: 0.0.4-alpha: - moved individual parsers to their own",
"__author__ = ( \"<NAME> <<EMAIL>>\", ) __all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" }",
"= { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF import BNF as BNF from",
"0.0.3-alpha: - readded old regex based parser as RegexInplaceParser 0.0.2-alpha: - wrote BNF",
"\"\"\"changelog: 0.0.4-alpha: - moved individual parsers to their own files 0.0.3-alpha: - readded",
"( \"<NAME> <<EMAIL>>\", ) __all__ = { \"BNF\", \"RegexInplaceParser\", \"HybridExpressionParser\" } from .BNF",
"their own files 0.0.3-alpha: - readded old regex based parser as RegexInplaceParser 0.0.2-alpha:"
] |
[
"\"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def",
"FakeAuthService() self.committed = False def _commit(self): self.committed = True def rollback(self): pass def",
"return next((u for u in self._users if u.id == id), None) def get_by_email(self,",
"if u.email == email), None) def list(self): return self._users class FakeAuthService: def verify_password(self,",
"AuthService class FakeRepository: def __init__(self, users: List[model.User]): self._users = set(users) self.seen = set()",
"= commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command,",
"commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow)",
"user: model.User): self._users.add(user) def get(self, id: str): return next((u for u in self._users",
"model.User): self._users.add(user) def get(self, id: str): return next((u for u in self._users if",
"__init__(self, users: List[model.User]): self._users = set(users) self.seen = set() def add(self, user: model.User):",
"next((u for u in self._users if u.email == email), None) def list(self): return",
"= True def rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class",
"== hash(user.password) def encrypt_password(self, password: str) -> str: return hash(password) def generate_token(self, password:",
"def verify_password(self, password: str, user: model.User) -> bool: return hash(password) == hash(user.password) def",
"uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234')",
"u in self._users if u.email == email), None) def list(self): return self._users class",
"user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app() command =",
"= False def _commit(self): self.committed = True def rollback(self): pass def bootstrap_test_app(): return",
"bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app() command =",
"self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\",",
"= commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self):",
"set(users) self.seen = set() def add(self, user: model.User): self._users.add(user) def get(self, id: str):",
"email), None) def list(self): return self._users class FakeAuthService: def verify_password(self, password: str, user:",
"lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\",",
"app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow =",
"hash(password) def generate_token(self, password: str, user: model.User) -> dict: return password def verify_token(self,",
"return hash(password) def generate_token(self, password: str, user: model.User) -> dict: return password def",
"bootstrap from app.domain import commands, model from app.service_layer import handlers from app.service_layer.unit_of_work import",
"class FakeAuthService: def verify_password(self, password: str, user: model.User) -> bool: return hash(password) ==",
"\"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app()",
"next((u for u in self._users if u.id == id), None) def get_by_email(self, email:",
"handlers from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import AuthService class FakeRepository: def __init__(self,",
") handlers.create_user(command, uow, lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd,",
"from app.service_layer import handlers from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import AuthService class",
"app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth,",
"app.domain import commands, model from app.service_layer import handlers from app.service_layer.unit_of_work import UnitOfWork from",
"generate_token(self, password: str, user: model.User) -> dict: return password def verify_token(self, token: str)",
"in self._users if u.id == id), None) def get_by_email(self, email: str): return next((u",
"token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService() self.committed",
"self._users = set(users) self.seen = set() def add(self, user: model.User): self._users.add(user) def get(self,",
"lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2",
"in self._users if u.email == email), None) def list(self): return self._users class FakeAuthService:",
"\"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self):",
"from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import AuthService class FakeRepository: def __init__(self, users:",
"= set(users) self.seen = set() def add(self, user: model.User): self._users.add(user) def get(self, id:",
"str) -> bool: return token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository = FakeRepository([])",
"str): return next((u for u in self._users if u.id == id), None) def",
"model from app.service_layer import handlers from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import AuthService",
"= bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow,",
"def encrypt_password(self, password: str) -> str: return hash(password) def generate_token(self, password: str, user:",
"commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow) # TODO: Fake token generation def verify_token_must_return(self):",
"commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) app_auth_1",
"self.auth_service = FakeAuthService() self.committed = False def _commit(self): self.committed = True def rollback(self):",
"-> bool: return hash(password) == hash(user.password) def encrypt_password(self, password: str) -> str: return",
"def list(self): return self._users class FakeAuthService: def verify_password(self, password: str, user: model.User) ->",
"TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\",",
") handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command",
"from unittest import TestCase from typing import List from app import bootstrap from",
"def __init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService() self.committed = False def",
"str) -> str: return hash(password) def generate_token(self, password: str, user: model.User) -> dict:",
"email: str): return next((u for u in self._users if u.email == email), None)",
"get_by_email(self, email: str): return next((u for u in self._users if u.email == email),",
"None) def get_by_email(self, email: str): return next((u for u in self._users if u.email",
"token: str) -> bool: return token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository =",
"u in self._users if u.id == id), None) def get_by_email(self, email: str): return",
"u.id == id), None) def get_by_email(self, email: str): return next((u for u in",
"\"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) app_auth_1 =",
"uow, lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow)",
"uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\",",
"\"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow",
"return self._users class FakeAuthService: def verify_password(self, password: str, user: model.User) -> bool: return",
"model.User) -> dict: return password def verify_token(self, token: str) -> bool: return token",
"id: str): return next((u for u in self._users if u.id == id), None)",
"def test_get_token_must_return_token(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\"",
"*args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow) # TODO: Fake",
"rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self):",
"def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\"",
"import UnitOfWork from app.service_layer.auth_service import AuthService class FakeRepository: def __init__(self, users: List[model.User]): self._users",
"return token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService()",
"def get(self, id: str): return next((u for u in self._users if u.id ==",
"hash(user.password) def encrypt_password(self, password: str) -> str: return hash(password) def generate_token(self, password: str,",
"str: return hash(password) def generate_token(self, password: str, user: model.User) -> dict: return password",
"self.committed = True def rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), )",
"str): return next((u for u in self._users if u.email == email), None) def",
"password def verify_token(self, token: str) -> bool: return token class FakeUnitOfWork(UnitOfWork): def __init__(self):",
"def generate_token(self, password: str, user: model.User) -> dict: return password def verify_token(self, token:",
"bool: return token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service =",
"if u.id == id), None) def get_by_email(self, email: str): return next((u for u",
"for u in self._users if u.id == id), None) def get_by_email(self, email: str):",
"self.seen = set() def add(self, user: model.User): self._users.add(user) def get(self, id: str): return",
"self._users if u.id == id), None) def get_by_email(self, email: str): return next((u for",
"= model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1,",
"command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def",
"\"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token",
"__init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService() self.committed = False def _commit(self):",
"List from app import bootstrap from app.domain import commands, model from app.service_layer import",
"self._users if u.email == email), None) def list(self): return self._users class FakeAuthService: def",
"hash(password) == hash(user.password) def encrypt_password(self, password: str) -> str: return hash(password) def generate_token(self,",
"import AuthService class FakeRepository: def __init__(self, users: List[model.User]): self._users = set(users) self.seen =",
"test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" )",
"add(self, user: model.User): self._users.add(user) def get(self, id: str): return next((u for u in",
"import handlers from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import AuthService class FakeRepository: def",
"def __init__(self, users: List[model.User]): self._users = set(users) self.seen = set() def add(self, user:",
"u.email == email), None) def list(self): return self._users class FakeAuthService: def verify_password(self, password:",
"model.User) -> bool: return hash(password) == hash(user.password) def encrypt_password(self, password: str) -> str:",
"get(self, id: str): return next((u for u in self._users if u.id == id),",
"\"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command =",
"{app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\",",
"handlers.create_user(command, uow, lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command,",
"handlers.create_user(command, uow, lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow)",
"uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\",",
"== email), None) def list(self): return self._users class FakeAuthService: def verify_password(self, password: str,",
"\"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) cmd =",
"app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\",",
"None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow) # TODO: Fake token",
"import List from app import bootstrap from app.domain import commands, model from app.service_layer",
"self.committed = False def _commit(self): self.committed = True def rollback(self): pass def bootstrap_test_app():",
"class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\",",
"FakeAuthService: def verify_password(self, password: str, user: model.User) -> bool: return hash(password) == hash(user.password)",
"List[model.User]): self._users = set(users) self.seen = set() def add(self, user: model.User): self._users.add(user) def",
"app.service_layer import handlers from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import AuthService class FakeRepository:",
"FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService() self.committed = False",
"def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow =",
"test_create_user_must_create_user(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" )",
"bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app()",
"app import bootstrap from app.domain import commands, model from app.service_layer import handlers from",
"list(self): return self._users class FakeAuthService: def verify_password(self, password: str, user: model.User) -> bool:",
"password: str, user: model.User) -> dict: return password def verify_token(self, token: str) ->",
"def rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def",
"True def rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase):",
"= uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app() command = commands.CreateUser(",
"import commands, model from app.service_layer import handlers from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service",
"def _commit(self): self.committed = True def rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False,",
"= set() def add(self, user: model.User): self._users.add(user) def get(self, id: str): return next((u",
"commands, model from app.service_layer import handlers from app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import",
"start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app() command = commands.CreateUser(",
"from app.service_layer.auth_service import AuthService class FakeRepository: def __init__(self, users: List[model.User]): self._users = set(users)",
"app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user",
"uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app() command",
"handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user =",
"unittest import TestCase from typing import List from app import bootstrap from app.domain",
"set() def add(self, user: model.User): self._users.add(user) def get(self, id: str): return next((u for",
"None) def list(self): return self._users class FakeAuthService: def verify_password(self, password: str, user: model.User)",
"_commit(self): self.committed = True def rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(),",
"dict: return password def verify_token(self, token: str) -> bool: return token class FakeUnitOfWork(UnitOfWork):",
"user: model.User) -> dict: return password def verify_token(self, token: str) -> bool: return",
"handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command =",
"TestCase from typing import List from app import bootstrap from app.domain import commands,",
"self.users: AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService() self.committed = False def _commit(self): self.committed",
"typing import List from app import bootstrap from app.domain import commands, model from",
"= FakeAuthService() self.committed = False def _commit(self): self.committed = True def rollback(self): pass",
"return hash(password) == hash(user.password) def encrypt_password(self, password: str) -> str: return hash(password) def",
"False def _commit(self): self.committed = True def rollback(self): pass def bootstrap_test_app(): return bootstrap.bootstrap(",
"commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234'))",
"command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args:",
"def test_create_user_must_create_user(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\"",
"\"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command",
"AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService() self.committed = False def _commit(self): self.committed =",
"None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\",",
"= FakeRepository([]) self.auth_service = FakeAuthService() self.committed = False def _commit(self): self.committed = True",
"FakeRepository([]) self.auth_service = FakeAuthService() self.committed = False def _commit(self): self.committed = True def",
"class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service = FakeAuthService() self.committed =",
"commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow",
"handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2}) def test_get_token_must_return_token(self): uow = bootstrap_test_app()",
"from typing import List from app import bootstrap from app.domain import commands, model",
"*args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\",",
"app.service_layer.unit_of_work import UnitOfWork from app.service_layer.auth_service import AuthService class FakeRepository: def __init__(self, users: List[model.User]):",
"return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app() command",
"uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command = commands.CreateUser(",
"self._users.add(user) def get(self, id: str): return next((u for u in self._users if u.id",
"test_get_token_must_return_token(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" )",
"verify_password(self, password: str, user: model.User) -> bool: return hash(password) == hash(user.password) def encrypt_password(self,",
"<reponame>AgRenaud/tirelire from unittest import TestCase from typing import List from app import bootstrap",
"== id), None) def get_by_email(self, email: str): return next((u for u in self._users",
"\"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow =",
"= model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command =",
"class FakeRepository: def __init__(self, users: List[model.User]): self._users = set(users) self.seen = set() def",
"\"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token =",
"return next((u for u in self._users if u.email == email), None) def list(self):",
"lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow) # TODO:",
"str, user: model.User) -> bool: return hash(password) == hash(user.password) def encrypt_password(self, password: str)",
"-> str: return hash(password) def generate_token(self, password: str, user: model.User) -> dict: return",
"-> dict: return password def verify_token(self, token: str) -> bool: return token class",
"command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2)",
"def get_by_email(self, email: str): return next((u for u in self._users if u.email ==",
"= commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow) # TODO: Fake token generation def",
"app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command",
"users: List[model.User]): self._users = set(users) self.seen = set() def add(self, user: model.User): self._users.add(user)",
") class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\",",
"def verify_token(self, token: str) -> bool: return token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users:",
"\"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\",",
"encrypt_password(self, password: str) -> str: return hash(password) def generate_token(self, password: str, user: model.User)",
"return password def verify_token(self, token: str) -> bool: return token class FakeUnitOfWork(UnitOfWork): def",
"pass def bootstrap_test_app(): return bootstrap.bootstrap( start_orm=False, uow=FakeUnitOfWork(), ) class TestHandlers(TestCase): def test_create_user_must_create_user(self): uow",
"FakeRepository: def __init__(self, users: List[model.User]): self._users = set(users) self.seen = set() def add(self,",
"uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command,",
"def add(self, user: model.User): self._users.add(user) def get(self, id: str): return next((u for u",
"\"secure_password\") token = handlers.get_token(cmd, uow) # TODO: Fake token generation def verify_token_must_return(self): pass",
"bool: return hash(password) == hash(user.password) def encrypt_password(self, password: str) -> str: return hash(password)",
"commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) cmd",
"import bootstrap from app.domain import commands, model from app.service_layer import handlers from app.service_layer.unit_of_work",
"user: model.User) -> bool: return hash(password) == hash(user.password) def encrypt_password(self, password: str) ->",
"password: str) -> str: return hash(password) def generate_token(self, password: str, user: model.User) ->",
"from app.domain import commands, model from app.service_layer import handlers from app.service_layer.unit_of_work import UnitOfWork",
"verify_token(self, token: str) -> bool: return token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository",
"uow, lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow) #",
"str, user: model.User) -> dict: return password def verify_token(self, token: str) -> bool:",
"for u in self._users if u.email == email), None) def list(self): return self._users",
"UnitOfWork from app.service_layer.auth_service import AuthService class FakeRepository: def __init__(self, users: List[model.User]): self._users =",
"\"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\",",
"-> bool: return token class FakeUnitOfWork(UnitOfWork): def __init__(self): self.users: AbstractUserRepository = FakeRepository([]) self.auth_service",
"cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\") token = handlers.get_token(cmd, uow) # TODO: Fake token generation",
"self._users class FakeAuthService: def verify_password(self, password: str, user: model.User) -> bool: return hash(password)",
") handlers.create_user(command, uow, lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1)",
"app.service_layer.auth_service import AuthService class FakeRepository: def __init__(self, users: List[model.User]): self._users = set(users) self.seen",
"model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\",",
"\"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP)",
"model.AppAuthorization(model.App.TIRELIRE_WEB) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_2) handlers.add_app_auth_to_user(command, uow) user = uow.users.get('id1234') self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2})",
"None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB)",
"password: str, user: model.User) -> bool: return hash(password) == hash(user.password) def encrypt_password(self, password:",
"self.assertIsNotNone(uow.users.get('id1234')) def test_add_app_auth_to_user_must_return(self): uow = bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\",",
"bootstrap_test_app() command = commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda",
"= commands.CreateUser( \"id1234\", \"secure_password\", \"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None)",
"*args: None) app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP) command = commands.AddAuthorizationToUser(\"id1234\", app_auth_1) handlers.add_app_auth_to_user(command, uow) app_auth_2 =",
"\"john\", \"doe\", \"<EMAIL>\" ) handlers.create_user(command, uow, lambda *args: None) cmd = commands.Authenticate(\"<EMAIL>\", \"secure_password\")",
"import TestCase from typing import List from app import bootstrap from app.domain import",
"from app import bootstrap from app.domain import commands, model from app.service_layer import handlers",
"id), None) def get_by_email(self, email: str): return next((u for u in self._users if"
] |
[
"ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in expression def",
"'projects' returns the key of the selected choice \"\"\" a = \"\\n\".join([\" {}.",
"first_time: retry_message = \"Hit Enter to try selecting a project again: \" input(utils.color_text(retry_message,",
"first_time = False return l[int(choice) - 1][0] def input_prompt(key, validation_method): user_input = ''",
"1][0] def input_prompt(key, validation_method): user_input = '' first_time = True while not validation_method(user_input):",
"import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path))",
"formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug",
"in formats.items() if extension in v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats,",
"= choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects =",
"== 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project = [p for p in",
"\"\"\" l: A list of tuples (key, display_value) with the valid choices key:",
"for k, v in formats.items() if extension in v['file-extensions']] if not formats: raise",
"formats.items() if extension in v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda",
"False return user_input class Wizard(object): def __init__(self, path_to_tx): p = Project(path_to_tx) self.host =",
"projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for p in projects] p_choices.append(create_project) if",
"projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We found no projects in this organization!\")",
"return l[int(choice) - 1][0] def input_prompt(key, validation_method): user_input = '' first_time = True",
"a + \"\\n\" print(a) choice = '' first_time = True r = '1'",
"not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return l[int(choice)",
"again: \" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for p",
"in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We found no projects in this",
"= p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>,",
"l[int(choice) - 1][0] def input_prompt(key, validation_method): user_input = '' first_time = True while",
"in this organization!\") first_time = False project_slug = choice_prompt(p_choices, 'projects') if project_slug ==",
"selected choice \"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i, f in",
"def validate_int(choice, length): try: choice = int(choice) except ValueError: return False return 0",
"= '1' if len(l) == 1 else '1-{}'.format(len(l)) while not validate_int(choice, len(l)): if",
"to try selecting a project again: \" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices",
"while not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time",
"first_time = True r = '1' if len(l) == 1 else '1-{}'.format(len(l)) while",
"def input_prompt(key, validation_method): user_input = '' first_time = True while not validation_method(user_input): if",
"Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0]) def run(self): \"\"\" Runs the interactive wizard",
"self.host = p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username,",
"only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations = self.api.get('organizations')",
"A list of tuples (key, display_value) with the valid choices key: one of",
"return project list sorted by name return sorted( [p for p in projects",
"Enter to try selecting a project again: \" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug)",
"is run. Returns: the options dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file =",
"valid choices key: one of 'formats', 'organizations', 'projects' returns the key of the",
"path_to_tx=p.txrc_file) def get_organizations(self): try: organizations = self.api.get('organizations') except Exception as e: logger.error(e) raise",
"p in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We found no projects in",
"try selecting a project again: \" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices =",
") expression = input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats')",
"from txclib.project import Project from txclib.log import logger from six.moves import input COLOR",
"'{} - {}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v)) for k, v in formats.items()",
"default True when interactive wizard is run. Returns: the options dictionary. \"\"\" TEXTS",
"slugify import slugify from txclib import messages from txclib import utils from txclib.api",
"= False return user_input class Wizard(object): def __init__(self, path_to_tx): p = Project(path_to_tx) self.host",
"input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description'])",
"return sorted( [p for p in projects if not p['archived']], key=lambda x: x['name']",
"\"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i, f in enumerate(l)]) a = a +",
"key of the selected choice \"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for",
"get_organizations(self): try: organizations = self.api.get('organizations') except Exception as e: logger.error(e) raise # return",
"[(o['slug'], o['name']) for o in organizations], key=lambda x: x[1] ) def get_projects_for_org(self, organization):",
"username, token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def",
"name return sorted( [(o['slug'], o['name']) for o in organizations], key=lambda x: x[1] )",
"project = None while not project: if not first_time: retry_message = \"Hit Enter",
"p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host,",
"len(l) == 1 else '1-{}'.format(len(l)) while not validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"])",
"organizations], key=lambda x: x[1] ) def get_projects_for_org(self, organization): try: projects = self.api.get('projects', organization=organization)",
"messages from txclib import utils from txclib.api import Api from txclib.project import Project",
"for i, f in enumerate(l)]) a = a + \"\\n\" print(a) choice =",
"choice <= length def choice_prompt(l, key): \"\"\" l: A list of tuples (key,",
"utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return user_input class Wizard(object): def __init__(self, path_to_tx): p",
"first_time = True create_project = (\"tx:new_project\", \"Create new project (show instructions)...\") project =",
"[p for p in projects if p['slug'] == project_slug][0] source_language = project['source_language']['code'] resource_slug",
"import input COLOR = \"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except",
"list sorted by name return sorted( [(o['slug'], o['name']) for o in organizations], key=lambda",
"in projects if not p['archived']], key=lambda x: x['name'] ) def get_formats(self, filename): _,",
"input_prompt(key, validation_method): user_input = '' first_time = True while not validation_method(user_input): if not",
"i18n_type = choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects",
"not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time =",
"a = a + \"\\n\" print(a) choice = '' first_time = True r",
"p_choices = [(p['slug'], p['name']) for p in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else:",
"return 0 < choice <= length def choice_prompt(l, key): \"\"\" l: A list",
"\"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass def validate_source_file(path):",
"returns the key of the selected choice \"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1,",
"while not project: if not first_time: retry_message = \"Hit Enter to try selecting",
"run(self): \"\"\" Runs the interactive wizard for `tx set` command and populates the",
"False return 0 < choice <= length def choice_prompt(l, key): \"\"\" l: A",
"validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False",
"= Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations = self.api.get('organizations') except Exception",
"x: x[1] ) def get_projects_for_org(self, organization): try: projects = self.api.get('projects', organization=organization) except Exception",
"return sorted(formats, key=lambda x: x[0]) def run(self): \"\"\" Runs the interactive wizard for",
"= False return l[int(choice) - 1][0] def input_prompt(key, validation_method): user_input = '' first_time",
"\\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return",
"Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations = self.api.get('organizations') except Exception as",
"def get_projects_for_org(self, organization): try: projects = self.api.get('projects', organization=organization) except Exception as e: logger.error(e)",
"[p for p in projects if not p['archived']], key=lambda x: x['name'] ) def",
"print(\"We found no projects in this organization!\") first_time = False project_slug = choice_prompt(p_choices,",
"not first_time: retry_message = \"Hit Enter to try selecting a project again: \"",
"sorted by name return sorted( [p for p in projects if not p['archived']],",
"if not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return",
"= True create_project = (\"tx:new_project\", \"Create new project (show instructions)...\") project = None",
"retry_message = \"Hit Enter to try selecting a project again: \" input(utils.color_text(retry_message, COLOR))",
"txclib import messages from txclib import utils from txclib.api import Api from txclib.project",
"COLOR)) first_time = False return user_input class Wizard(object): def __init__(self, path_to_tx): p =",
"if not first_time: retry_message = \"Hit Enter to try selecting a project again:",
"choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return l[int(choice) - 1][0] def",
"projects = self.api.get('projects', organization=organization) except Exception as e: logger.error(e) raise # return project",
"except Exception as e: logger.error(e) raise # return project list sorted by name",
"1 else '1-{}'.format(len(l)) while not validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice =",
"True r = '1' if len(l) == 1 else '1-{}'.format(len(l)) while not validate_int(choice,",
"logger.error(e) raise def display_format(v): return '{} - {}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v))",
"and populates the parser's options with the user input. Options `local` and `execute`",
"Exception as e: logger.error(e) raise # return project list sorted by name return",
"new project (show instructions)...\") project = None while not project: if not first_time:",
"wizard is run. Returns: the options dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file",
"else: project = [p for p in projects if p['slug'] == project_slug][0] source_language",
"organization=organization) except Exception as e: logger.error(e) raise # return project list sorted by",
"= False project_slug = choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug",
"return user_input class Wizard(object): def __init__(self, path_to_tx): p = Project(path_to_tx) self.host = p.config.get('main',",
"key=lambda x: x['name'] ) def get_formats(self, filename): _, extension = os.path.splitext(filename) try: formats",
"source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression) formats =",
"e: logger.error(e) raise # return project list sorted by name return sorted( [p",
"path_to_tx): p = Project(path_to_tx) self.host = p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials( self.host,",
"formats = [(k, display_format(v)) for k, v in formats.items() if extension in v['file-extensions']]",
"project: if not first_time: retry_message = \"Hit Enter to try selecting a project",
"project list sorted by name return sorted( [p for p in projects if",
"import os from slugify import slugify from txclib import messages from txclib import",
"organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects = [] first_time =",
"filename): _, extension = os.path.splitext(filename) try: formats = self.api.get('formats') except Exception as e:",
"p in projects if p['slug'] == project_slug][0] source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file))",
"interactive wizard is run. Returns: the options dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description'])",
"COLOR)) projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for p in projects] p_choices.append(create_project)",
"first_time = False project_slug = choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host,",
"instructions)...\") project = None while not project: if not first_time: retry_message = \"Hit",
"print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project = [p for p in projects if",
"choice_prompt(organizations, 'organization') projects = [] first_time = True create_project = (\"tx:new_project\", \"Create new",
"[(k, display_format(v)) for k, v in formats.items() if extension in v['file-extensions']] if not",
"projects = [] first_time = True create_project = (\"tx:new_project\", \"Create new project (show",
"o['name']) for o in organizations], key=lambda x: x[1] ) def get_projects_for_org(self, organization): try:",
"project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options = { 'source_file': source_file,",
"messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return l[int(choice) - 1][0] def input_prompt(key, validation_method): user_input",
"True create_project = (\"tx:new_project\", \"Create new project (show instructions)...\") project = None while",
"= '' first_time = True while not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input",
"if extension in v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x:",
"'{}.{}'.format(project_slug, resource_slug) options = { 'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type, 'source_language': source_language,",
"run. Returns: the options dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file',",
"Project(path_to_tx) self.host = p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api =",
"Returns: the options dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file)",
"resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options = { 'source_file': source_file, 'expression':",
"p in projects if not p['archived']], key=lambda x: x['name'] ) def get_formats(self, filename):",
"= \"Hit Enter to try selecting a project again: \" input(utils.color_text(retry_message, COLOR)) projects",
"choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project =",
"create_project = (\"tx:new_project\", \"Create new project (show instructions)...\") project = None while not",
"project_slug][0] source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options =",
"input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return user_input class Wizard(object): def __init__(self, path_to_tx):",
"== project_slug][0] source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options",
"x[0]) def run(self): \"\"\" Runs the interactive wizard for `tx set` command and",
"{}\".format(i+1, f[1]) for i, f in enumerate(l)]) a = a + \"\\n\" print(a)",
"= [(k, display_format(v)) for k, v in formats.items() if extension in v['file-extensions']] if",
"x: x[0]) def run(self): \"\"\" Runs the interactive wizard for `tx set` command",
"= { 'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type, 'source_language': source_language, 'resource': resource, }",
"interactive wizard for `tx set` command and populates the parser's options with the",
"host=self.host, org=org_slug )) else: project = [p for p in projects if p['slug']",
"slugify from txclib import messages from txclib import utils from txclib.api import Api",
"while not validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR))",
"x['name'] ) def get_formats(self, filename): _, extension = os.path.splitext(filename) try: formats = self.api.get('formats')",
"\"\\n\" print(a) choice = '' first_time = True r = '1' if len(l)",
"= self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug =",
"with the valid choices key: one of 'formats', 'organizations', 'projects' returns the key",
"wizard for `tx set` command and populates the parser's options with the user",
"+ \"\\n\" print(a) choice = '' first_time = True r = '1' if",
"in v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0]) def",
"[] first_time = True create_project = (\"tx:new_project\", \"Create new project (show instructions)...\") project",
"are by default True when interactive wizard is run. Returns: the options dictionary.",
"return '{} - {}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v)) for k, v in",
")) else: project = [p for p in projects if p['slug'] == project_slug][0]",
"if not first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return",
"def validate_expression(expression): return '<lang>' in expression def validate_int(choice, length): try: choice = int(choice)",
"this organization!\") first_time = False project_slug = choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project':",
"os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in expression def validate_int(choice, length): try: choice =",
"p = Project(path_to_tx) self.host = p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials( self.host, only_token=True)",
"one of 'formats', 'organizations', 'projects' returns the key of the selected choice \"\"\"",
"= os.path.splitext(filename) try: formats = self.api.get('formats') except Exception as e: logger.error(e) raise def",
"a project again: \" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name'])",
"COLOR = \"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass",
"`tx set` command and populates the parser's options with the user input. Options",
"def get_organizations(self): try: organizations = self.api.get('organizations') except Exception as e: logger.error(e) raise #",
"<= length def choice_prompt(l, key): \"\"\" l: A list of tuples (key, display_value)",
"a = \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i, f in enumerate(l)]) a =",
"in projects if p['slug'] == project_slug][0] source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource",
"= project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options = { 'source_file':",
"resource_slug) options = { 'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type, 'source_language': source_language, 'resource':",
"messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression)",
"self.api.get('organizations') except Exception as e: logger.error(e) raise # return org list sorted by",
"e: logger.error(e) raise def display_format(v): return '{} - {}'.format(v['description'], v['file-extensions']) formats = [(k,",
"class Wizard(object): def __init__(self, path_to_tx): p = Project(path_to_tx) self.host = p.config.get('main', 'host') username,",
"of tuples (key, display_value) with the valid choices key: one of 'formats', 'organizations',",
"raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0]) def run(self): \"\"\" Runs the interactive",
"'' first_time = True r = '1' if len(l) == 1 else '1-{}'.format(len(l))",
"user input. Options `local` and `execute` are by default True when interactive wizard",
"organizations = self.api.get('organizations') except Exception as e: logger.error(e) raise # return org list",
"first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return l[int(choice) -",
"six.moves import input COLOR = \"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\")",
"'' first_time = True while not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input =",
"not project: if not first_time: retry_message = \"Hit Enter to try selecting a",
"'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type, 'source_language': source_language, 'resource': resource, } return options",
"_, extension = os.path.splitext(filename) try: formats = self.api.get('formats') except Exception as e: logger.error(e)",
"if projects: print(TEXTS['projects']['description']) else: print(\"We found no projects in this organization!\") first_time =",
"p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We found no projects in this organization!\") first_time",
"except Exception as e: logger.error(e) raise # return org list sorted by name",
"dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) )",
"= \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i, f in enumerate(l)]) a = a",
"validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in expression def validate_int(choice, length): try:",
"def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in expression def validate_int(choice, length):",
"= slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options = { 'source_file': source_file, 'expression': expression,",
"print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return user_input class Wizard(object):",
"validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time =",
") def get_projects_for_org(self, organization): try: projects = self.api.get('projects', organization=organization) except Exception as e:",
"first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return user_input class",
"not first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return user_input",
"org=org_slug )) else: project = [p for p in projects if p['slug'] ==",
"= '' first_time = True r = '1' if len(l) == 1 else",
"extension in v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0])",
"print(a) choice = '' first_time = True r = '1' if len(l) ==",
"self.api.get('projects', organization=organization) except Exception as e: logger.error(e) raise # return project list sorted",
"print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression) formats",
"(\"tx:new_project\", \"Create new project (show instructions)...\") project = None while not project: if",
"except ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in expression",
"o in organizations], key=lambda x: x[1] ) def get_projects_for_org(self, organization): try: projects =",
"project_slug = choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else:",
"= self.api.get('formats') except Exception as e: logger.error(e) raise def display_format(v): return '{} -",
"display_format(v): return '{} - {}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v)) for k, v",
"return sorted( [(o['slug'], o['name']) for o in organizations], key=lambda x: x[1] ) def",
"formats = self.api.get('formats') except Exception as e: logger.error(e) raise def display_format(v): return '{}",
"= choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project",
"the selected choice \"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i, f",
"the valid choices key: one of 'formats', 'organizations', 'projects' returns the key of",
"expression def validate_int(choice, length): try: choice = int(choice) except ValueError: return False return",
"extension = os.path.splitext(filename) try: formats = self.api.get('formats') except Exception as e: logger.error(e) raise",
"key=lambda x: x[1] ) def get_projects_for_org(self, organization): try: projects = self.api.get('projects', organization=organization) except",
"utils from txclib.api import Api from txclib.project import Project from txclib.log import logger",
"logger.error(e) raise # return org list sorted by name return sorted( [(o['slug'], o['name'])",
"validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description'])",
"- {}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v)) for k, v in formats.items() if",
"project (show instructions)...\") project = None while not project: if not first_time: retry_message",
"v['file-extensions']) formats = [(k, display_format(v)) for k, v in formats.items() if extension in",
"self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations = self.api.get('organizations') except",
"self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for p in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description'])",
"TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats,",
"\"Hit Enter to try selecting a project again: \" input(utils.color_text(retry_message, COLOR)) projects =",
"org list sorted by name return sorted( [(o['slug'], o['name']) for o in organizations],",
"else '1-{}'.format(len(l)) while not validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text(",
"def run(self): \"\"\" Runs the interactive wizard for `tx set` command and populates",
"p['name']) for p in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We found no",
"length def choice_prompt(l, key): \"\"\" l: A list of tuples (key, display_value) with",
"projects in this organization!\") first_time = False project_slug = choice_prompt(p_choices, 'projects') if project_slug",
"return '<lang>' in expression def validate_int(choice, length): try: choice = int(choice) except ValueError:",
"import Project from txclib.log import logger from six.moves import input COLOR = \"CYAN\"",
"\"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression",
"no projects in this organization!\") first_time = False project_slug = choice_prompt(p_choices, 'projects') if",
"x: x['name'] ) def get_formats(self, filename): _, extension = os.path.splitext(filename) try: formats =",
"from txclib.api import Api from txclib.project import Project from txclib.log import logger from",
"key): \"\"\" l: A list of tuples (key, display_value) with the valid choices",
"except Exception as e: logger.error(e) raise def display_format(v): return '{} - {}'.format(v['description'], v['file-extensions'])",
"from six.moves import input COLOR = \"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab:",
"display_format(v)) for k, v in formats.items() if extension in v['file-extensions']] if not formats:",
"of the selected choice \"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i,",
"as e: logger.error(e) raise def display_format(v): return '{} - {}'.format(v['description'], v['file-extensions']) formats =",
"= None while not project: if not first_time: retry_message = \"Hit Enter to",
"enumerate(l)]) a = a + \"\\n\" print(a) choice = '' first_time = True",
"the parser's options with the user input. Options `local` and `execute` are by",
"command and populates the parser's options with the user input. Options `local` and",
"as e: logger.error(e) raise # return org list sorted by name return sorted(",
"slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options = { 'source_file': source_file, 'expression': expression, 'i18n_type':",
"= [] first_time = True create_project = (\"tx:new_project\", \"Create new project (show instructions)...\")",
"host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations = self.api.get('organizations') except Exception as e: logger.error(e)",
"= input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return l[int(choice) - 1][0] def input_prompt(key,",
"print(TEXTS['projects']['description']) else: print(\"We found no projects in this organization!\") first_time = False project_slug",
"logger.error(e) raise # return project list sorted by name return sorted( [p for",
"first_time = False return user_input class Wizard(object): def __init__(self, path_to_tx): p = Project(path_to_tx)",
"user_input = '' first_time = True while not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error'])",
"= (\"tx:new_project\", \"Create new project (show instructions)...\") project = None while not project:",
"None while not project: if not first_time: retry_message = \"Hit Enter to try",
"try: formats = self.api.get('formats') except Exception as e: logger.error(e) raise def display_format(v): return",
"input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return l[int(choice) - 1][0] def input_prompt(key, validation_method):",
"p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations",
"self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations =",
"readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression):",
"validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type",
"return org list sorted by name return sorted( [(o['slug'], o['name']) for o in",
"< choice <= length def choice_prompt(l, key): \"\"\" l: A list of tuples",
"options dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file)",
"= input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations =",
"\"\"\" Runs the interactive wizard for `tx set` command and populates the parser's",
"Project from txclib.log import logger from six.moves import input COLOR = \"CYAN\" try:",
"by name return sorted( [p for p in projects if not p['archived']], key=lambda",
"choice \"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i, f in enumerate(l)])",
"try: projects = self.api.get('projects', organization=organization) except Exception as e: logger.error(e) raise # return",
"Api from txclib.project import Project from txclib.log import logger from six.moves import input",
"= True while not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'],",
"sorted( [p for p in projects if not p['archived']], key=lambda x: x['name'] )",
"- 1][0] def input_prompt(key, validation_method): user_input = '' first_time = True while not",
"p['archived']], key=lambda x: x['name'] ) def get_formats(self, filename): _, extension = os.path.splitext(filename) try:",
"os from slugify import slugify from txclib import messages from txclib import utils",
"from txclib import messages from txclib import utils from txclib.api import Api from",
"`local` and `execute` are by default True when interactive wizard is run. Returns:",
"user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return user_input class Wizard(object): def",
"print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type =",
"{}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v)) for k, v in formats.items() if extension",
"\"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1, f[1]) for i, f in enumerate(l)]) a",
"with the user input. Options `local` and `execute` are by default True when",
"options with the user input. Options `local` and `execute` are by default True",
"= '{}.{}'.format(project_slug, resource_slug) options = { 'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type, 'source_language':",
"in enumerate(l)]) a = a + \"\\n\" print(a) choice = '' first_time =",
"project = [p for p in projects if p['slug'] == project_slug][0] source_language =",
"False return l[int(choice) - 1][0] def input_prompt(key, validation_method): user_input = '' first_time =",
"input COLOR = \"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError:",
"organization): try: projects = self.api.get('projects', organization=organization) except Exception as e: logger.error(e) raise #",
"TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression =",
"from slugify import slugify from txclib import messages from txclib import utils from",
"from txclib import utils from txclib.api import Api from txclib.project import Project from",
"0 < choice <= length def choice_prompt(l, key): \"\"\" l: A list of",
"when interactive wizard is run. Returns: the options dictionary. \"\"\" TEXTS = messages.TEXTS",
"organization!\") first_time = False project_slug = choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format(",
"Wizard(object): def __init__(self, path_to_tx): p = Project(path_to_tx) self.host = p.config.get('main', 'host') username, token_or_password",
"Options `local` and `execute` are by default True when interactive wizard is run.",
"import slugify from txclib import messages from txclib import utils from txclib.api import",
"the options dictionary. \"\"\" TEXTS = messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print(",
"__init__(self, path_to_tx): p = Project(path_to_tx) self.host = p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials(",
"print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization')",
"expression = input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations",
"'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects = [] first_time",
"complete\") except ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in",
"choice = '' first_time = True r = '1' if len(l) == 1",
"import logger from six.moves import input COLOR = \"CYAN\" try: import readline readline.set_completer_delims('",
"project again: \" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for",
"populates the parser's options with the user input. Options `local` and `execute` are",
"choices key: one of 'formats', 'organizations', 'projects' returns the key of the selected",
"x[1] ) def get_projects_for_org(self, organization): try: projects = self.api.get('projects', organization=organization) except Exception as",
"input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for p in projects]",
"# return project list sorted by name return sorted( [p for p in",
"sorted( [(o['slug'], o['name']) for o in organizations], key=lambda x: x[1] ) def get_projects_for_org(self,",
"= True r = '1' if len(l) == 1 else '1-{}'.format(len(l)) while not",
"import utils from txclib.api import Api from txclib.project import Project from txclib.log import",
"try: organizations = self.api.get('organizations') except Exception as e: logger.error(e) raise # return org",
"= a + \"\\n\" print(a) choice = '' first_time = True r =",
"{}. {}\".format(i+1, f[1]) for i, f in enumerate(l)]) a = a + \"\\n\"",
"# return org list sorted by name return sorted( [(o['slug'], o['name']) for o",
"by name return sorted( [(o['slug'], o['name']) for o in organizations], key=lambda x: x[1]",
"not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0]) def run(self): \"\"\" Runs",
"raise def display_format(v): return '{} - {}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v)) for",
"token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self):",
"display_value) with the valid choices key: one of 'formats', 'organizations', 'projects' returns the",
"txclib import utils from txclib.api import Api from txclib.project import Project from txclib.log",
"= p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try:",
"readline.parse_and_bind(\"tab: complete\") except ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>'",
"org_slug = choice_prompt(organizations, 'organization') projects = [] first_time = True create_project = (\"tx:new_project\",",
"for o in organizations], key=lambda x: x[1] ) def get_projects_for_org(self, organization): try: projects",
"pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in expression def validate_int(choice,",
"of 'formats', 'organizations', 'projects' returns the key of the selected choice \"\"\" a",
"= messages.TEXTS print(TEXTS['source_file']['description']) source_file = input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression',",
"== 1 else '1-{}'.format(len(l)) while not validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice",
"= int(choice) except ValueError: return False return 0 < choice <= length def",
"raise # return project list sorted by name return sorted( [p for p",
"def display_format(v): return '{} - {}'.format(v['description'], v['file-extensions']) formats = [(k, display_format(v)) for k,",
"not validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time",
"self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations,",
"sorted(formats, key=lambda x: x[0]) def run(self): \"\"\" Runs the interactive wizard for `tx",
"len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False",
"get_projects_for_org(self, organization): try: projects = self.api.get('projects', organization=organization) except Exception as e: logger.error(e) raise",
"try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass def validate_source_file(path): return",
"'formats', 'organizations', 'projects' returns the key of the selected choice \"\"\" a =",
"l: A list of tuples (key, display_value) with the valid choices key: one",
"for p in projects if p['slug'] == project_slug][0] source_language = project['source_language']['code'] resource_slug =",
"key: one of 'formats', 'organizations', 'projects' returns the key of the selected choice",
"= choice_prompt(organizations, 'organization') projects = [] first_time = True create_project = (\"tx:new_project\", \"Create",
"user_input class Wizard(object): def __init__(self, path_to_tx): p = Project(path_to_tx) self.host = p.config.get('main', 'host')",
"password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file) def get_organizations(self): try: organizations = self.api.get('organizations') except Exception as e:",
") def get_formats(self, filename): _, extension = os.path.splitext(filename) try: formats = self.api.get('formats') except",
"= self.api.get('organizations') except Exception as e: logger.error(e) raise # return org list sorted",
"tuples (key, display_value) with the valid choices key: one of 'formats', 'organizations', 'projects'",
"= self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for p in projects] p_choices.append(create_project) if projects:",
"length): try: choice = int(choice) except ValueError: return False return 0 < choice",
"get_formats(self, filename): _, extension = os.path.splitext(filename) try: formats = self.api.get('formats') except Exception as",
"raise # return org list sorted by name return sorted( [(o['slug'], o['name']) for",
"COLOR)) first_time = False return l[int(choice) - 1][0] def input_prompt(key, validation_method): user_input =",
"'1-{}'.format(len(l)) while not validate_int(choice, len(l)): if not first_time: print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r),",
"'organizations', 'projects' returns the key of the selected choice \"\"\" a = \"\\n\".join([\"",
"the interactive wizard for `tx set` command and populates the parser's options with",
"choice = int(choice) except ValueError: return False return 0 < choice <= length",
"formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0]) def run(self): \"\"\" Runs the",
"choice_prompt(formats, 'formats') organizations = self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects = []",
"in organizations], key=lambda x: x[1] ) def get_projects_for_org(self, organization): try: projects = self.api.get('projects',",
"[(p['slug'], p['name']) for p in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We found",
"Exception as e: logger.error(e) raise def display_format(v): return '{} - {}'.format(v['description'], v['file-extensions']) formats",
"def get_formats(self, filename): _, extension = os.path.splitext(filename) try: formats = self.api.get('formats') except Exception",
"if not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0]) def run(self): \"\"\"",
"= self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects = [] first_time = True",
"choice_prompt(l, key): \"\"\" l: A list of tuples (key, display_value) with the valid",
"key=lambda x: x[0]) def run(self): \"\"\" Runs the interactive wizard for `tx set`",
"for `tx set` command and populates the parser's options with the user input.",
"self.api.get('formats') except Exception as e: logger.error(e) raise def display_format(v): return '{} - {}'.format(v['description'],",
"list of tuples (key, display_value) with the valid choices key: one of 'formats',",
"if not p['archived']], key=lambda x: x['name'] ) def get_formats(self, filename): _, extension =",
"first_time = True while not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input = input(",
"v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty']) return sorted(formats, key=lambda x: x[0]) def run(self):",
"'<lang>' in expression def validate_int(choice, length): try: choice = int(choice) except ValueError: return",
"True while not validation_method(user_input): if not first_time: print(messages.TEXTS[key]['error']) user_input = input( utils.color_text(messages.TEXTS[key]['message'], COLOR))",
"else: print(\"We found no projects in this organization!\") first_time = False project_slug =",
"for p in projects if not p['archived']], key=lambda x: x['name'] ) def get_formats(self,",
"the key of the selected choice \"\"\" a = \"\\n\".join([\" {}. {}\".format(i+1, f[1])",
"= Project(path_to_tx) self.host = p.config.get('main', 'host') username, token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api",
"{ 'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type, 'source_language': source_language, 'resource': resource, } return",
"return False return 0 < choice <= length def choice_prompt(l, key): \"\"\" l:",
"not p['archived']], key=lambda x: x['name'] ) def get_formats(self, filename): _, extension = os.path.splitext(filename)",
"ValueError: return False return 0 < choice <= length def choice_prompt(l, key): \"\"\"",
"validation_method): user_input = '' first_time = True while not validation_method(user_input): if not first_time:",
"list sorted by name return sorted( [p for p in projects if not",
"txclib.project import Project from txclib.log import logger from six.moves import input COLOR =",
"v in formats.items() if extension in v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty']) return",
"f[1]) for i, f in enumerate(l)]) a = a + \"\\n\" print(a) choice",
"resource = '{}.{}'.format(project_slug, resource_slug) options = { 'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type,",
"from txclib.log import logger from six.moves import input COLOR = \"CYAN\" try: import",
"'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project = [p",
"for p in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We found no projects",
"try: choice = int(choice) except ValueError: return False return 0 < choice <=",
"and `execute` are by default True when interactive wizard is run. Returns: the",
"txclib.log import logger from six.moves import input COLOR = \"CYAN\" try: import readline",
"if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project = [p for",
"logger from six.moves import input COLOR = \"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;')",
"set` command and populates the parser's options with the user input. Options `local`",
"print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects = [] first_time = True create_project =",
"source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug) options = {",
"= input_prompt('source_file', validate_source_file) print( TEXTS['expression']['description'].format(source_file=source_file) ) expression = input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file))",
"= [p for p in projects if p['slug'] == project_slug][0] source_language = project['source_language']['code']",
"selecting a project again: \" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'],",
"f in enumerate(l)]) a = a + \"\\n\" print(a) choice = '' first_time",
"'organization') projects = [] first_time = True create_project = (\"tx:new_project\", \"Create new project",
"readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass def validate_source_file(path): return os.path.isfile(os.path.abspath(path)) def",
"(show instructions)...\") project = None while not project: if not first_time: retry_message =",
"\"Create new project (show instructions)...\") project = None while not project: if not",
"i, f in enumerate(l)]) a = a + \"\\n\" print(a) choice = ''",
"sorted by name return sorted( [(o['slug'], o['name']) for o in organizations], key=lambda x:",
"validate_expression(expression): return '<lang>' in expression def validate_int(choice, length): try: choice = int(choice) except",
"Runs the interactive wizard for `tx set` command and populates the parser's options",
"if len(l) == 1 else '1-{}'.format(len(l)) while not validate_int(choice, len(l)): if not first_time:",
"'1' if len(l) == 1 else '1-{}'.format(len(l)) while not validate_int(choice, len(l)): if not",
"Exception as e: logger.error(e) raise # return org list sorted by name return",
"txclib.api import Api from txclib.project import Project from txclib.log import logger from six.moves",
"as e: logger.error(e) raise # return project list sorted by name return sorted(",
"the user input. Options `local` and `execute` are by default True when interactive",
"in expression def validate_int(choice, length): try: choice = int(choice) except ValueError: return False",
"input_prompt('expression', validate_expression) formats = self.get_formats(os.path.basename(source_file)) print(TEXTS['formats']['description']) i18n_type = choice_prompt(formats, 'formats') organizations = self.get_organizations()",
"if p['slug'] == project_slug][0] source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug,",
"r = '1' if len(l) == 1 else '1-{}'.format(len(l)) while not validate_int(choice, len(l)):",
"projects if not p['archived']], key=lambda x: x['name'] ) def get_formats(self, filename): _, extension",
"return os.path.isfile(os.path.abspath(path)) def validate_expression(expression): return '<lang>' in expression def validate_int(choice, length): try: choice",
"name return sorted( [p for p in projects if not p['archived']], key=lambda x:",
"projects if p['slug'] == project_slug][0] source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource =",
"= [(p['slug'], p['name']) for p in projects] p_choices.append(create_project) if projects: print(TEXTS['projects']['description']) else: print(\"We",
"except ValueError: return False return 0 < choice <= length def choice_prompt(l, key):",
"def __init__(self, path_to_tx): p = Project(path_to_tx) self.host = p.config.get('main', 'host') username, token_or_password =",
"import messages from txclib import utils from txclib.api import Api from txclib.project import",
"(key, display_value) with the valid choices key: one of 'formats', 'organizations', 'projects' returns",
"= input( utils.color_text(messages.TEXTS[key]['message'], COLOR)) first_time = False return user_input class Wizard(object): def __init__(self,",
"print(messages.TEXTS[key][\"error\"]) choice = input(utils.color_text( messages.TEXTS[key]['message'].format(r=r), COLOR)) first_time = False return l[int(choice) - 1][0]",
"False project_slug = choice_prompt(p_choices, 'projects') if project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug ))",
"input. Options `local` and `execute` are by default True when interactive wizard is",
"self.get_organizations() print(TEXTS['organization']['description']) org_slug = choice_prompt(organizations, 'organization') projects = [] first_time = True create_project",
"def choice_prompt(l, key): \"\"\" l: A list of tuples (key, display_value) with the",
"'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project = [p for p in projects",
"\" input(utils.color_text(retry_message, COLOR)) projects = self.get_projects_for_org(org_slug) p_choices = [(p['slug'], p['name']) for p in",
"= self.api.get('projects', organization=organization) except Exception as e: logger.error(e) raise # return project list",
"by default True when interactive wizard is run. Returns: the options dictionary. \"\"\"",
"projects: print(TEXTS['projects']['description']) else: print(\"We found no projects in this organization!\") first_time = False",
"`execute` are by default True when interactive wizard is run. Returns: the options",
"import Api from txclib.project import Project from txclib.log import logger from six.moves import",
"int(choice) except ValueError: return False return 0 < choice <= length def choice_prompt(l,",
"'host') username, token_or_password = p.getset_host_credentials( self.host, only_token=True) self.api = Api(username=username, password=<PASSWORD>, host=self.host, path_to_tx=p.txrc_file)",
"os.path.splitext(filename) try: formats = self.api.get('formats') except Exception as e: logger.error(e) raise def display_format(v):",
"parser's options with the user input. Options `local` and `execute` are by default",
"found no projects in this organization!\") first_time = False project_slug = choice_prompt(p_choices, 'projects')",
"options = { 'source_file': source_file, 'expression': expression, 'i18n_type': i18n_type, 'source_language': source_language, 'resource': resource,",
"True when interactive wizard is run. Returns: the options dictionary. \"\"\" TEXTS =",
"= \"CYAN\" try: import readline readline.set_completer_delims(' \\t\\n;') readline.parse_and_bind(\"tab: complete\") except ImportError: pass def",
"p['slug'] == project_slug][0] source_language = project['source_language']['code'] resource_slug = slugify(os.path.basename(source_file)) resource = '{}.{}'.format(project_slug, resource_slug)",
"project_slug == 'tx:new_project': print(messages.create_project_instructions.format( host=self.host, org=org_slug )) else: project = [p for p",
"e: logger.error(e) raise # return org list sorted by name return sorted( [(o['slug'],",
"k, v in formats.items() if extension in v['file-extensions']] if not formats: raise Exception(messages.TEXTS['formats']['empty'])",
"validate_int(choice, length): try: choice = int(choice) except ValueError: return False return 0 <"
] |
[
"tile \"\"\" @abstractmethod def __init__(self, location, name, price): \"\"\" :param location: (int) Location,",
"(int) Location, (0 - 39) on the monopoly board :param name: (String) Name",
"(Player Object) Player that is unmortgageing the tile \"\"\" self.is_mortgaged = False self.price",
"player: (Player Object) The player that landed on the tile \"\"\" if self.owner",
"Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give player money :param amount: (int) Amount of",
"that landed on card tile :return: calls draw_card \"\"\" self.active_player = player return",
"self.get_out_of_jail_free() elif key == 8: return self.go_back(3) elif key == 9: return self.go_to_jail()",
"player \"\"\" self.amount = int(amount) super().__init__(location, name) def landed_on(self, player): \"\"\" Takes amount",
"the player \"\"\" self.percent = float(percent) super().__init__(location, name) def landed_on(self, player): \"\"\" Charges",
"testing Board.default_board() class TileFactory: \"\"\" Creates all possible different tiles, used with read_in_board",
"amount * -1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String) Easy to read tile",
"it to Free Parking :param player: (Player Object) Player that landed on tile",
"loop_value = False except FileNotFoundError: if file_name == \"Q\": quit() print(\"File Not found,",
"rent_tiers += str(tier) + ', ' owned_by = self.format_owner() output = \"{0} {1}",
"board :name: (String) Name of the location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\"",
"print(\"You've gained $\", amount) self.active_player.money += amount def lose_money(self, amount): \"\"\" Takes player's",
"the file should be formatted as follows: Square# ClassType class data \"\"\" loop_value",
"(float or String) percent to tax the player \"\"\" self.percent = float(percent) super().__init__(location,",
"self.gain_money(100) else: return print(\"Bad Chance Card Draw\") class CommunityChest(Card): \"\"\" All Community Chest",
"elif key == 6: return self.get_money_from_all_other_players(50) elif key == 7: return self.gain_money(100) elif",
"price Sets owner to player Charges Player unmortgage price :param player: (Player Object)",
"elif class_type == 'JustVisiting': return {10: JustVisiting()} elif class_type == 'GoToJail': return {30:",
"Class ---- :amount: Amount to tax the player \"\"\" def __init__(self, location, name,",
"Attributes: ---- From Location Class ---- :location: (int) position, (0 - 39), on",
"Tile :param price: (Optional, int, default=150) purchase cost of the tile \"\"\" super().__init__(location,",
":number_of_houses: (int) Number of houses on the property, 0 - 5 Zero is",
"if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class",
"Monopoly Board Attributes: :spaces: (Dict) A dictionary where the key is the location",
"= input(\"Please enter the file Name: \") with open('boards/' + file_name) as file:",
"Effect(ABC): \"\"\" Parent class for all squares where an effect is applied. Including",
"data = [data[3], ] + last_part_data if class_type == \"Property\": return {position: Property(position,",
"purchase cost of the tile :owner: (UserEntity Object) Current Owner of the tile",
"(Player Object) Player that landed on the tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if",
"5, 10, 20, 40, 80, 160]) for x in range(0, 40)} railroads =",
"cost for one house \"\"\" if location > 30: return 200 elif location",
"for {} I don't know how to make you pay everyone else... yet\".format(self.active_player))",
"landed_on(self, player): print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail) tile",
"gets money from all other players :param amount: (int) amount gotten from other",
"full price Sets owner to player Charges Player unmortgage price :param player: (Player",
"1/2 price, Sets owner to Bank, \"\"\" self.is_mortgaged = True self.price = self.price",
"except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not Found!\") break except IndexError: data",
"No! you've been sent to jail!!\") self.active_player.position = 'jail' def house_repairs(self): \"\"\" Charges",
"def default_board(cls): \"\"\" Builds a default board for testing \"\"\" cls.spaces = {}",
"\"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return",
"self.name) return output class Go(CornerTile): \"\"\" Models GO Tile Attributes: ---- From CornerTile",
"Just Visiting (jail) tile Attributes: ---- From CornerTile Class ---- :location: (int) position,",
"a random random card and calls the appropriate method \"\"\" key = randint(0,",
"everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active player gets money from all",
"unmortgageing the tile \"\"\" self.is_mortgaged = False self.price = self.price * 2 self.owner",
"return output class PercentTax(Effect): \"\"\" Charges player a set tax amount, is not",
":name: (String) Name of the location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param",
"self.draw_card() def draw_card(self): pass # -------------Card effects -------------- def advance_to_tile(self, tile_num): \"\"\" Moves",
"int, default=200) purchase cost of the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self,",
"name): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"Number of tiles to be moved back \"\"\" self.active_player.position -= num_tiles print(\"You've been",
"will be affecting \"\"\" def __init__(self, location, name): \"\"\" :param location: (int) Location,",
"get_out_of_jail_free(self): \"\"\" Gives player a get out of jail free card \"\"\" print(\"You",
"---- :percent: percent to tax the player \"\"\" def __init__(self, location, name, percent):",
"their total wealth and gives it to free parking :param player: (Player Object)",
"+= 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all 4 railroads",
"def landed_on(self, player): # TODO: find a way to print out what players",
"+= 1 def go_back(self, num_tiles): \"\"\" Moves player back specified number of spaces",
"From CornerTile Class ---- :location: (int) position, (0 - 39), on the monopoly",
"\"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class Property(Location): \"\"\" Defines all",
"Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): print(\"Landed on Go!\")",
"monopoly board :name: (String) Name of the location ---- New In Card Class",
"For Each of the corner tiles Excluding Free Parking. Attributes: :location: (int) position,",
"function depending on who landed on the property and who owns the property",
"PercentTax(Effect): \"\"\" Charges player a set tax amount, is not dependant on the",
"def landed_on(self, player): \"\"\" Calls the proper function depending on who landed on",
"player a set tax amount, is not dependant on the player's wealth Attributes:",
"you, you have no houses\") else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def",
"self.lose_money(50) elif key == 12: return self.lose_money(150) elif key == 13: return self.gain_money(25)",
"CommunityChest(Card): \"\"\" All Community Chest Cards Attributes: ---- From Card Class ---- :location:",
"elif key == 4: return self.get_out_of_jail_free() elif key == 5: return self.go_to_jail() elif",
"player: (Player Object) Player that landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self):",
"price to 1/2 price, Sets owner to Bank, \"\"\" self.is_mortgaged = True self.price",
"enter the file Name: \") with open('boards/' + file_name) as file: for line",
"13: return self.advance_to_tile(39) elif key == 14: return self.pay_all_other_players(50) elif key == 15:",
"the content is the property \"\"\" spaces = {} @classmethod def default_board(cls): \"\"\"",
"{2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return owned_by def __str__(self): \"\"\" :return: (String) Easy",
"players \"\"\" amount = amount * -1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String)",
"self.price = self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner = Bank def unmortgage(self, player):",
"+= str(tier) + ', ' owned_by = self.format_owner() output = \"{0} {1} {2}\"",
"on the monopoly board :param name: (String) Name of the Tile :param price:",
"= 'jail' print(\"Go To Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location, name)",
"on the board, int from 0 to 39 :param property_data: (1x9 array-like) list",
"Player, Bank, FreeParking from InputValidation import get_yes_or_no_input from random import randint from Exceptions",
"player): pass def __str__(self): \"\"\" :return: (String) Description of the tile \"\"\" output",
"is the location of a tile and the content is the property \"\"\"",
"InputValidation import get_yes_or_no_input from random import randint from Exceptions import TilesClassNotFoundError class Location(ABC):",
"\"\"\" super().__init__(location, name, price) def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent",
"go_to_jail(self): \"\"\" Sends the player to jail, player does not pass go and",
"\"\"\" def __init__(self, location, name): \"\"\" :param location: (int) Location, (0 - 39)",
"player): player.position = 'jail' print(\"Go To Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail', name='jail'):",
"Player that landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self):",
"gotten from other players \"\"\" amount = amount * -1 self.pay_all_other_players(amount) def __str__(self):",
"method for all objects in spaces \"\"\" output = '' for key in",
"= None class SetTax(Effect): \"\"\" Charges player a set tax amount, is not",
"all other players print(\"Lucky for {} I don't know how to make you",
"{1}\".format(self.location, self.name) return output class Go(CornerTile): \"\"\" Models GO Tile Attributes: ---- From",
"landed_on(self, player): player.position = 'jail' print(\"Go To Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail',",
"landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self): \"\"\" :return:",
"= name @abstractmethod def landed_on(self, player): pass class Card(Effect): \"\"\" Parent Class for",
"name: (String) Name of the Tile :param price: (int) purchase cost of the",
"\"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class Property(Location): \"\"\" Defines all the Properties on",
":return: (int) cost for one house \"\"\" if location > 30: return 200",
"== 2: return self.lose_money(50) elif key == 3: return self.gain_money(50) elif key ==",
"self.gain_money(150) elif key == 16: return self.gain_money(100) else: return print(\"Bad Chance Card Draw\")",
"isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class for",
"in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\"",
"owned_by = self.format_owner() output = \"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per",
"elif key == 4: return self.advance_to_next(Railroad) elif key == 5: return self.advance_to_next(Railroad) elif",
"] + last_part_data if class_type == \"Property\": return {position: Property(position, name, data)} elif",
"output = \"{0} {1}\".format(self.location, self.name) return output class Go(CornerTile): \"\"\" Models GO Tile",
"elif key == 5: return self.go_to_jail() elif key == 6: return self.get_money_from_all_other_players(50) elif",
"\"FreeParking\": return {position: FreeParking(position)} elif class_type == \"Go\": return {0: Go()} elif class_type",
"location: (int) Location, (0 - 39) on the monopoly board :param name: (String)",
"(0 - 39) on the monopoly board :param name: (Optional, String, default=GO) Name",
"= location self.name = name @abstractmethod def landed_on(self, player): pass def __str__(self): \"\"\"",
"you like to buy \" + self.name + \" for $\" + str(self.price)",
"total wealth and gives it to free parking :param player: (Player Object) Player",
"* 2 def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between owner",
"self.owner = Bank self.is_mortgaged = False super().__init__() def landed_on(self, player): \"\"\" Calls the",
"is not None: position = int(data[0]) class_type = data[1] name = data[2] try:",
"board :param name: (Optional, String, default=JustVisiting) Name of the Tile \"\"\" super().__init__(location, name)",
"of the location ---- New in PercentTax Class ---- :percent: percent to tax",
"(String) Easy to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location,",
"class_type == \"Chance\": return {position: Chance(position, name)} elif class_type == \"CommunityChest\": return {position:",
"\"\"\" Takes player's money :param amount: (int) amount of money to take from",
"for testing Board.default_board() class TileFactory: \"\"\" Creates all possible different tiles, used with",
"cls.spaces[key].__str__() return output # construct the default board for testing Board.default_board() class TileFactory:",
"and player :param player: (Player Object) Player that landed on tile \"\"\" num_utils_owned",
"like to buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would you like to buy \"",
"they would like to purchase the property, displays the Name and price :return:",
"Attributes: :spaces: (Dict) A dictionary where the key is the location of a",
"== 15: return self.gain_money(150) elif key == 16: return self.gain_money(100) else: return print(\"Bad",
"New In Card Class ---- :active_player: (Player Object) Player that the card will",
"9: return self.go_to_jail() elif key == 10: return self.house_repairs() elif key == 11:",
"for __str__() :return: (String) Easy to read owner information \"\"\" if isinstance(self.owner, Bank):",
"the Tile \"\"\" self.active_player = None super().__init__(location, name) def landed_on(self, player): \"\"\" Sets",
"\"\"\" def __init__(self, location, name, price=150): \"\"\" :param location: (int) Location, (0 -",
"39) on the monopoly board :param name: (String) Name of the Tile :param",
"on the tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: # buy player.money =",
"property :number_of_houses: (int) Number of houses on the property, 0 - 5 Zero",
"Class for Chance and Community Chest Cards Attributes: ---- From Effect Class ----",
"= False while not isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1 if location_to_check >",
"the monopoly board :param name: (String) Name of the Tile \"\"\" super().__init__(location, name)",
"all the Properties on the board Does not include railroads or utilities Attributes:",
"3: return self.gain_money(50) elif key == 4: return self.get_out_of_jail_free() elif key == 5:",
"one house \"\"\" def __init__(self, location, name, property_data): \"\"\" :param location: (Int) position",
"Type: \", data[1], \" Not Found!\") break except IndexError: data = None class",
"---- New in SetTax Class ---- :amount: Amount to tax the player \"\"\"",
":rent: (1x6 array-like) Rent tiers for the property :number_of_houses: (int) Number of houses",
"2 def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between owner and",
"Advances active player to the next tile of specified class type :param class_type:",
"self.get_money_from_all_other_players(50) elif key == 7: return self.gain_money(100) elif key == 8: return self.gain_money(20)",
"tiles landed_on method. :param num_tiles: (int) Number of tiles to be moved back",
"\"Property\": return {position: Property(position, name, data)} elif class_type == \"Utility\": return {position: Utility(position,",
"7: return self.gain_money(100) elif key == 8: return self.gain_money(20) elif key == 9:",
"to quit\\n\") @classmethod def __str__(cls): \"\"\" :return: (String) Formatted __str__ method for all",
"of one house \"\"\" def __init__(self, location, name, property_data): \"\"\" :param location: (Int)",
"self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location):",
"key == 7: return self.gain_money(100) elif key == 8: return self.gain_money(20) elif key",
"property_data[0] self.rent = property_data[2:] self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1]))",
"__str__(self): \"\"\" :return: (String) Description of the tile \"\"\" output = \"{0} {1}\".format(self.location,",
"Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not Found!\")",
"$\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\" Active player pays all other",
"return 200 elif location > 20: return 150 elif location > 10: return",
"(int) Price of one house \"\"\" def __init__(self, location, name, property_data): \"\"\" :param",
"last_part_data if class_type == \"Property\": return {position: Property(position, name, data)} elif class_type ==",
"price, Sets owner to Bank, \"\"\" self.is_mortgaged = True self.price = self.price /",
"from 0 to 39 :param property_data: (1x9 array-like) list with various data formatted",
"displays the Name and price :return: (Boolean) True if the player would like",
"In Card Class ---- :active_player: (Player Object) Player that the card will be",
"key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC):",
"self.rent = property_data[2:] self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod",
"draw_card(self): pass # -------------Card effects -------------- def advance_to_tile(self, tile_num): \"\"\" Moves player to",
"self.is_mortgaged = False self.price = self.price * 2 self.owner = player self.owner.exchange_money(self.owner, self.price",
"amount def lose_money(self, amount): \"\"\" Takes player's money :param amount: (int) amount of",
"def __init__(self, location=0, name='GO'): \"\"\" :param location: (int) Location, (0 - 39) on",
"new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except FileNotFoundError: if file_name == \"Q\":",
"for all squares where an effect is applied. Including Chance, Community Chest, Income",
"for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class",
"quit\\n\") @classmethod def __str__(cls): \"\"\" :return: (String) Formatted __str__ method for all objects",
"\"\"\" self.location = location self.name = name self.price = price self.owner = Bank",
"elif key == 3: return self.advance_to_next(Utility) elif key == 4: return self.advance_to_next(Railroad) elif",
"Defines all the Properties on the board Does not include railroads or utilities",
"20: return 150 elif location > 10: return 100 else: return 50 def",
"{1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4}, Number Of Houses: {5}\"",
"how to make you pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active",
"updates owner, and sets is_mortgaged to False :param player: (Player Object) Player that",
"player percent of their total wealth and gives it to free parking :param",
"to buy \" + self.name + \" for $\" + str(self.price) + \"?",
"an effect is applied. Including Chance, Community Chest, Income tax, etc. Attributes: :location:",
"the property, 0 - 5 Zero is No houses Five is a hotel",
"Charges player a set tax amount, is not dependant on the player's wealth",
"@abstractmethod def __init__(self, location, name, price): \"\"\" :param location: (int) Location, (0 -",
"2 self.owner = player self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self): \"\"\" Formats current",
"except FileNotFoundError: if file_name == \"Q\": quit() print(\"File Not found, please try again.\\n\\tOr",
"-------------Card effects -------------- def advance_to_tile(self, tile_num): \"\"\" Moves player to specified tile and",
"return self.advance_to_tile(5) elif key == 13: return self.advance_to_tile(39) elif key == 14: return",
"return 50 def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\"",
"and the content is the property \"\"\" spaces = {} @classmethod def default_board(cls):",
"\"\"\" Class that sends people to jail \"\"\" def __init__(self, location=30, name='Go To",
"Name of the location ---- New In Card Class ---- :active_player: (Player Object)",
"key == 15: return self.gain_money(150) elif key == 16: return self.gain_money(100) else: return",
"the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): # TODO: find a way",
"import ABC, abstractmethod from UserEntity import Player, Bank, FreeParking from InputValidation import get_yes_or_no_input",
"import get_yes_or_no_input from random import randint from Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\"",
"on the monopoly board :param name: (Optional, String, default=GO) Name of the Tile",
"return self.advance_to_tile(0) elif key == 1: return self.advance_to_tile(24) elif key == 2: return",
"\"\"\" Charges player rent, transfers rent between owner and player :param player: (Player",
"owner to player Charges Player unmortgage price :param player: (Player Object) Player that",
"12: return self.advance_to_tile(5) elif key == 13: return self.advance_to_tile(39) elif key == 14:",
"name, price): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"on the tile \"\"\" if self.owner == Bank: self.owned_by_bank(player) elif self.owner != player:",
"- 39), on the monopoly board :name: (String) Name of the location ----",
"appropriate method \"\"\" key = randint(0, 16) if key == 0: return self.advance_to_tile(0)",
"spaces = {} @classmethod def default_board(cls): \"\"\" Builds a default board for testing",
"{} streets = {x: Property(x, \"Name\", [\"Color\", 150, 5, 10, 20, 40, 80,",
":param player: (Player Object) Player that landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money",
"the Tile :param price: (Optional, int, default=150) purchase cost of the tile \"\"\"",
"of the location \"\"\" @abstractmethod def __init__(self, location, name): \"\"\" :param location: (int)",
"(int) amount to pay other players \"\"\" # TODO: implement pay all other",
"class_type: (Object) class of tile to advance to examples: Railroad, Utility, Card \"\"\"",
"player will be moved to \"\"\" # Checks if player will pass go",
"\"\"\" key = randint(0, 16) if key == 0: return self.advance_to_tile(0) elif key",
"the monopoly board :name: (String) Name of the location ---- New in SetTax",
"\"\\n\\t you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles): \"\"\"",
"TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not Found!\") break except IndexError:",
"return self.gain_money(100) else: print(\"bad CC draw\") class Board(object): \"\"\" The Monopoly Board Attributes:",
"data is not None: position = int(data[0]) class_type = data[1] name = data[2]",
"GoToJail()} elif class_type == 'jail': return {'jail': Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError:",
"rent_hotel] \"\"\" self.color = property_data[0] self.rent = property_data[2:] self.number_of_houses = 0 self.cost_per_house =",
"self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles): \"\"\" Moves player back specified number of",
"== 8: return self.go_back(3) elif key == 9: return self.go_to_jail() elif key ==",
"Name of the Tile :param percent: (float or String) percent to tax the",
"price: (Optional, int, default=150) purchase cost of the tile \"\"\" super().__init__(location, name, price)",
"of the location \"\"\" def __init__(self, location=0, name='GO'): \"\"\" :param location: (int) Location,",
"\"\"\" buy_or_pass = get_yes_or_no_input( \"Would you like to buy \" + self.name +",
"__init__(self, location, name, property_data): \"\"\" :param location: (Int) position on the board, int",
"will pass go if self.active_player.position >= tile_num: self.active_player.money += 200 self.active_player.position = tile_num",
"return self.house_repairs() elif key == 15: return self.gain_money(10) elif key == 16: return",
"= randint(1, 6) for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1",
"!= player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives the player the option to",
"random import randint from Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent Class",
"you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles): \"\"\" Moves",
"card will be affecting \"\"\" def __init__(self, location, name): \"\"\" :param location: (int)",
"are expensive!\") if owed_money == 0: print(\"Lucky for you, you have no houses\")",
"Chance(Card): \"\"\" All Chance Cards Attributes: ---- From Card Class ---- :location: (int)",
"\"\"\" Defines all the Properties on the board Does not include railroads or",
"Give player money :param amount: (int) Amount of money to give active player",
"cost of the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self, player): \"\"\" Charges",
"== \"Utility\": return {position: Utility(position, name)} elif class_type == \"Railroad\": return {position: Railroad(position,",
"percent to tax the player \"\"\" self.percent = float(percent) super().__init__(location, name) def landed_on(self,",
"skipped pass print(\"House repairs are expensive!\") if owed_money == 0: print(\"Lucky for you,",
"class_type == \"Go\": return {0: Go()} elif class_type == 'JustVisiting': return {10: JustVisiting()}",
"self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines the price for one",
"the tile \"\"\" self.location = location self.name = name self.price = price self.owner",
"def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location: (int) Location, (0 - 39) on",
"Defines all utilities i.e. Electric Company and Water Works Attributes: ---- From Location",
"if buy_or_pass: # buy player.money = player.money - self.price self.owner = player player.owned_properites.update({self.location:",
"of the Tile \"\"\" self.location = location self.name = name @abstractmethod def landed_on(self,",
"return owned_by def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\"",
"def draw_card(self): \"\"\" Chooses a random random card and calls the appropriate method",
"\"\"\" loop_value = True while loop_value: try: if get_yes_or_no_input('Would You Like To Use",
"in jail print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class that sends people to",
"1 if location_to_check > 39: location_to_check = location_to_check % 40 passed_go = True",
"self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price = self.price * 2",
"on card tile :return: calls draw_card \"\"\" self.active_player = player return self.draw_card() def",
"to the next tile of specified class type :param class_type: (Object) class of",
"tax, etc. Attributes: :location: (int) position, (0 - 39), on the monopoly board",
"def unmortgage(self, player): \"\"\" Sets is_mortgaged to False, Sets price to full price",
"self.go_to_jail() elif key == 10: return self.house_repairs() elif key == 11: return self.lose_money(15)",
"if location_to_check > 39: location_to_check = location_to_check % 40 passed_go = True self.active_player.position",
"class data \"\"\" loop_value = True while loop_value: try: if get_yes_or_no_input('Would You Like",
"landed on the tile \"\"\" if self.owner == Bank: self.owned_by_bank(player) elif self.owner !=",
"owned_by def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" output",
"self.advance_to_tile(5) elif key == 13: return self.advance_to_tile(39) elif key == 14: return self.pay_all_other_players(50)",
"location self.name = name @abstractmethod def landed_on(self, player): pass class Card(Effect): \"\"\" Parent",
"'jail': return {'jail': Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1],",
"Price, rent, rent_1_house, ..., rent_hotel] \"\"\" self.color = property_data[0] self.rent = property_data[2:] self.number_of_houses",
"> 10: return 100 else: return 50 def __str__(self): \"\"\" :return: (String) Easy",
"to read tile description \"\"\" rent_tiers = '' for tier in self.rent: rent_tiers",
"player: (Player Object) Player that landed on tile \"\"\" num_railroads_owned = 0 cost",
"output class PercentTax(Effect): \"\"\" Charges player a set tax amount, is not dependant",
"\"\"\" Models Just Visiting (jail) tile Attributes: ---- From CornerTile Class ---- :location:",
"board from file. Each line in the file should be formatted as follows:",
"houses Five is a hotel :cost_per_house: (int) Price of one house \"\"\" def",
"utilities i.e. Electric Company and Water Works Attributes: ---- From Location Class ----",
"monopoly board :param name: (String) Name of the Tile \"\"\" self.location = location",
"return print(\"Bad Chance Card Draw\") class CommunityChest(Card): \"\"\" All Community Chest Cards Attributes:",
"the monopoly board :name: (String) Name of the location \"\"\" def __init__(self, location=10,",
"1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all 4 railroads Attributes:",
"name) def landed_on(self, player): print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\" Models Just Visiting",
"(0 - 39), on the monopoly board :name: (String) Name of the location",
"num_tiles, \"tiles.\", \"\\nYou're now on tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self):",
"Water Works Attributes: ---- From Location Class ---- :location: (int) position, (0 -",
":param player: (Player Object) Player that landed on card tile :return: calls draw_card",
"monopoly board :name: (String) Name of the location \"\"\" def __init__(self, location=0, name='GO'):",
"for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll *",
"to pay other players \"\"\" # TODO: implement pay all other players print(\"Lucky",
"= TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except FileNotFoundError: if file_name == \"Q\": quit()",
"(int) Number of tiles to be moved back \"\"\" self.active_player.position -= num_tiles print(\"You've",
"from UserEntity import Player, Bank, FreeParking from InputValidation import get_yes_or_no_input from random import",
"file: if not line.startswith('#'): data = line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value =",
"elif class_type == \"Go\": return {0: Go()} elif class_type == 'JustVisiting': return {10:",
"return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances active player to the next tile",
"Card Draw\") class CommunityChest(Card): \"\"\" All Community Chest Cards Attributes: ---- From Card",
"Rent tiers for the property :number_of_houses: (int) Number of houses on the property,",
"would like to buy False if the player would not like to buy",
"class_type == \"Utility\": return {position: Utility(position, name)} elif class_type == \"Railroad\": return {position:",
"tile of specified class type :param class_type: (Object) class of tile to advance",
"---- From Effect Class ---- :location: (int) position, (0 - 39), on the",
"player.money * self.percent) def __str__(self): \"\"\" :return: (String) Easy to read tile description",
"location_to_check if passed_go: self.active_player.money += 200 print(\"You've advanced to the next \", str(class_type),",
"- 39) on the monopoly board :param name: (Optional, String, default=JustVisiting) Name of",
"(Optional, String, default=JustVisiting) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player):",
"data: Data read in from a file :return: A tile to be added",
"class_type == \"CommunityChest\": return {position: CommunityChest(position, name)} elif class_type == \"SetTax\": return {position:",
"file_name = 'StandardBoard' else: file_name = input(\"Please enter the file Name: \") with",
"is_mortgaged to True, Gives owner mortgage value (1/2 price), Sets price to 1/2",
"community_chest = {x: CommunityChest(x, \"Community Chest Card\") for x in [2, 17, 33]}",
"owner information \"\"\" if isinstance(self.owner, Bank): owned_by = \"Owner: {0}, Current Rent {1}\"",
"where the key is the location of a tile and the content is",
"% 40 passed_go = True self.active_player.position = location_to_check if passed_go: self.active_player.money += 200",
"Current Owner of the tile :is_mortgaged: (Boolean) mortgage state of the tile ----",
"if file_name == \"Q\": quit() print(\"File Not found, please try again.\\n\\tOr Enter Q",
"player would like to buy False if the player would not like to",
"Sets price to 1/2 price, Sets owner to Bank, \"\"\" self.is_mortgaged = True",
"[2, 17, 33]} free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking)",
"= amount * -1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String) Easy to read",
":param name: (String) Name of the Tile \"\"\" self.active_player = None super().__init__(location, name)",
"print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\" Active player pays",
"9: return self.get_money_from_all_other_players(10) elif key == 10: return self.gain_money(100) elif key == 11:",
"got a get out of jail free card\", \"\\n\\t you now have \",",
"\"Would you like to buy \" + self.name + \" for $\" +",
"does not collect $200 \"\"\" print(\"Oh No! you've been sent to jail!!\") self.active_player.position",
"\"\"\" Defines all utilities i.e. Electric Company and Water Works Attributes: ---- From",
"CornerTile Class ---- :location: (int) position, (0 - 39), on the monopoly board",
"amount to tax the player \"\"\" self.amount = int(amount) super().__init__(location, name) def landed_on(self,",
"self.amount) def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" output",
"name, amount): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"if isinstance(self.owner, Bank): owned_by = \"Owner: {0}, Current Rent {1}\" \\ .format(self.owner, self.format_current_rent())",
"Takes player's money :param amount: (int) amount of money to take from active",
"- 5 Zero is No houses Five is a hotel :cost_per_house: (int) Price",
"owns the property :param player: (Player Object) The player that landed on the",
"Attributes: ---- From Effect Class ---- :location: (int) position, (0 - 39), on",
"money to give active player \"\"\" print(\"You've gained $\", amount) self.active_player.money += amount",
"next tile of specified class type :param class_type: (Object) class of tile to",
"tax amount, is not dependant on the player's wealth Attributes: ---- From Effect",
"'jail' def house_repairs(self): \"\"\" Charges player house repairs \"\"\" owed_money = 0 for",
"name: (String) Name of the Tile :param price: (Optional, int, default=150) purchase cost",
"def mortgage(self): \"\"\" Sets is_mortgaged to True, Gives owner mortgage value (1/2 price),",
"Parent Class for Chance and Community Chest Cards Attributes: ---- From Effect Class",
"Utility(Location): \"\"\" Defines all utilities i.e. Electric Company and Water Works Attributes: ----",
"-= num_tiles print(\"You've been sent back \", num_tiles, \"tiles.\", \"\\nYou're now on tile",
"39: location_to_check = location_to_check % 40 passed_go = True self.active_player.position = location_to_check if",
"print(\"Bad Chance Card Draw\") class CommunityChest(Card): \"\"\" All Community Chest Cards Attributes: ----",
"Bank): owned_by = \"Owner: {0}, Current Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by",
"class type :param class_type: (Object) class of tile to advance to examples: Railroad,",
"and gives it to free parking :param player: (Player Object) Player that landed",
"price: (int) purchase cost of the tile \"\"\" self.location = location self.name =",
"you've been sent to jail!!\") self.active_player.position = 'jail' def house_repairs(self): \"\"\" Charges player",
":percent: percent to tax the player \"\"\" def __init__(self, location, name, percent): \"\"\"",
"return self.get_money_from_all_other_players(10) elif key == 10: return self.gain_money(100) elif key == 11: return",
"in Property Class ---- :color: (String) Color of the property :rent: (1x6 array-like)",
"location_to_check % 40 passed_go = True self.active_player.position = location_to_check if passed_go: self.active_player.money +=",
"the player's wealth Attributes: ---- From Effect Class ---- :location: (int) position, (0",
"lose_money(self, amount): \"\"\" Takes player's money :param amount: (int) amount of money to",
"for $\" + str(self.price) + \"? y/n\") return buy_or_pass def mortgage(self): \"\"\" Sets",
"key == 0: return self.advance_to_tile(0) elif key == 1: return self.gain_money(200) elif key",
"= 'StandardBoard' else: file_name = input(\"Please enter the file Name: \") with open('boards/'",
"= \"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return output class",
"Parent Class for all locations on the board Attributes: :location: (int) position, (0",
"None: position = int(data[0]) class_type = data[1] name = data[2] try: data =",
"\"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the player if they would like",
"Effect Class ---- :location: (int) position, (0 - 39), on the monopoly board",
"the location :price: (int) purchase cost of the tile :owner: (UserEntity Object) Current",
"= \"Owner: {0}, Price: {1}, Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return owned_by",
"output class Go(CornerTile): \"\"\" Models GO Tile Attributes: ---- From CornerTile Class ----",
"(1x6 array-like) Rent tiers for the property :number_of_houses: (int) Number of houses on",
"+= 1 if location_to_check > 39: location_to_check = location_to_check % 40 passed_go =",
"property_data): \"\"\" :param location: (Int) position on the board, int from 0 to",
"elif key == 16: return self.gain_money(100) else: return print(\"Bad Chance Card Draw\") class",
"== Bank: self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives",
"= [data[3], ] + last_part_data if class_type == \"Property\": return {position: Property(position, name,",
"Railroad(x, \"Name\") for x in [5, 15, 25, 35]} utilities = {x: Utility(x,",
"super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines the price for one house",
"{x: CommunityChest(x, \"Community Chest Card\") for x in [2, 17, 33]} free_parking =",
"return self.lose_money(150) elif key == 13: return self.gain_money(25) elif key == 14: return",
"name) def landed_on(self, player): \"\"\" Sets Active player to player, then calls draw_card()",
"{30: GoToJail()} elif class_type == 'jail': return {'jail': Jail()} else: raise TilesClassNotFoundError except",
"owed_money += 25 * hold except AttributeError: # Corner Tiles have no attribute",
"tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the player if they would",
"house \"\"\" if location > 30: return 200 elif location > 20: return",
"owner, and sets is_mortgaged to False :param player: (Player Object) Player that landed",
"String, default=JustVisiting) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): #",
"calls the appropriate method \"\"\" key = randint(0, 16) if key == 0:",
"if key == 0: return self.advance_to_tile(0) elif key == 1: return self.gain_money(200) elif",
":color: (String) Color of the property :rent: (1x6 array-like) Rent tiers for the",
"+= 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class for all squares where",
"\"\"\" All Chance Cards Attributes: ---- From Card Class ---- :location: (int) position,",
"of spaces and calls that tiles landed_on method. :param num_tiles: (int) Number of",
"amount :param amount: (int) amount to pay other players \"\"\" # TODO: implement",
"amount) def get_out_of_jail_free(self): \"\"\" Gives player a get out of jail free card",
"Attributes: ---- From Card Class ---- :location: (int) position, (0 - 39), on",
"player that landed on the tile \"\"\" if self.owner == Bank: self.owned_by_bank(player) elif",
"Parent class for all squares where an effect is applied. Including Chance, Community",
"return self.advance_to_next(Railroad) elif key == 5: return self.advance_to_next(Railroad) elif key == 6: return",
"\"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Go(CornerTile): \"\"\" Models GO",
"Utility(x, \"Name\") for x in [12, 28]} chances = {x: Chance(x, \"Chance Card\")",
"Sets owner to player Charges Player unmortgage price :param player: (Player Object) Player",
"return {position: Railroad(position, name)} elif class_type == \"Chance\": return {position: Chance(position, name)} elif",
"\"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\" Parent",
"class Board(object): \"\"\" The Monopoly Board Attributes: :spaces: (Dict) A dictionary where the",
"Player that landed on tile \"\"\" num_utils_owned = 0 multiplier = {1: 4,",
"\"\"\" amount = amount * -1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String) Easy",
"price: (Optional, int, default=200) purchase cost of the tile \"\"\" super().__init__(location, name, price)",
"the tile \"\"\" self.is_mortgaged = False self.price = self.price * 2 self.owner =",
"roll = randint(1, 6) for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned +=",
"location > 10: return 100 else: return 50 def __str__(self): \"\"\" :return: (String)",
"read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location,",
"for x in [5, 15, 25, 35]} utilities = {x: Utility(x, \"Name\") for",
"location: (int) Location, (0 - 39) on the monopoly board :param name: (Optional,",
":param player: (Player Object) Player that landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def",
"\"\"\" Active player pays all other players specified amount :param amount: (int) amount",
"been moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type):",
":name: (String) Name of the location ---- New in SetTax Class ---- :amount:",
"information for __str__() :return: (String) Easy to read owner information \"\"\" if isinstance(self.owner,",
"return {0: Go()} elif class_type == 'JustVisiting': return {10: JustVisiting()} elif class_type ==",
"return self.pay_all_other_players(50) elif key == 15: return self.gain_money(150) elif key == 16: return",
"x in data[4:]] data = [data[3], ] + last_part_data if class_type == \"Property\":",
"\"\"\" Sets is_mortgaged to False, Sets price to full price Sets owner to",
"(int) Tile the active player will be moved to \"\"\" # Checks if",
"(String) Color of the property :rent: (1x6 array-like) Rent tiers for the property",
"print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives player a get",
"return self.lose_money(50) elif key == 12: return self.lose_money(150) elif key == 13: return",
"cls.spaces = {} streets = {x: Property(x, \"Name\", [\"Color\", 150, 5, 10, 20,",
"Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): # TODO: find",
"purchase cost of the tile \"\"\" self.location = location self.name = name self.price",
"\" Not Found!\") break except IndexError: data = None class SetTax(Effect): \"\"\" Charges",
"self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner = Bank def unmortgage(self, player): \"\"\" Sets",
"Property Class ---- :color: (String) Color of the property :rent: (1x6 array-like) Rent",
"else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\" Active player",
"\"\"\" if isinstance(self.owner, Bank): owned_by = \"Owner: {0}, Current Rent {1}\" \\ .format(self.owner,",
":param name: (String) Name of the Tile :param price: (Optional, int, default=200) purchase",
"Name of the Tile :param price: (Optional, int, default=150) purchase cost of the",
"print(\"Go To Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location, name) def landed_on(self,",
"Board?'): file_name = 'StandardBoard' else: file_name = input(\"Please enter the file Name: \")",
"not collect $200 \"\"\" print(\"Oh No! you've been sent to jail!!\") self.active_player.position =",
"player): pass class Card(Effect): \"\"\" Parent Class for Chance and Community Chest Cards",
"player: (Player Object) Player that landed on card tile :return: calls draw_card \"\"\"",
"return output class Chance(Card): \"\"\" All Chance Cards Attributes: ---- From Card Class",
"\"{0} {1}\".format(self.location, self.name) return output class Chance(Card): \"\"\" All Chance Cards Attributes: ----",
"{1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by = \"Owner: {0}, Price: {1}, Morgaged: {2}\"",
"Including Chance, Community Chest, Income tax, etc. Attributes: :location: (int) position, (0 -",
"elif key == 7: self.get_out_of_jail_free() elif key == 8: return self.go_back(3) elif key",
"== 8: return self.gain_money(20) elif key == 9: return self.get_money_from_all_other_players(10) elif key ==",
"amount) self.active_player.money += amount def lose_money(self, amount): \"\"\" Takes player's money :param amount:",
"# Corner Tiles have no attribute owner, skipped pass print(\"House repairs are expensive!\")",
"in Board.spaces: try: if Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses owed_money += 25",
"player: (Player Object) Player that landed on tile \"\"\" num_utils_owned = 0 multiplier",
"owed_money == 0: print(\"Lucky for you, you have no houses\") else: print(\"You paid:",
"'GoToJail': return {30: GoToJail()} elif class_type == 'jail': return {'jail': Jail()} else: raise",
"player: (Player Object) Player that landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money *",
"location, name, price=200): \"\"\" :param location: (int) Location, (0 - 39) on the",
"== 9: return self.get_money_from_all_other_players(10) elif key == 10: return self.gain_money(100) elif key ==",
"= {x: Chance(x, \"Chance Card\") for x in [7, 22, 36]} community_chest =",
"cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class for all squares where an effect is",
"\"\"\" print(\"You got a get out of jail free card\", \"\\n\\t you now",
"money from all other players :param amount: (int) amount gotten from other players",
"buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged to True, Gives owner mortgage value (1/2",
"landed on tile \"\"\" num_utils_owned = 0 multiplier = {1: 4, 2: 10}",
"* multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all 4 railroads Attributes: ---- From Location",
"location, name): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"the monopoly board :name: (String) Name of the location \"\"\" @abstractmethod def __init__(self,",
"17, 33]} free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod",
"{} @classmethod def default_board(cls): \"\"\" Builds a default board for testing \"\"\" cls.spaces",
"\"\"\" num_utils_owned = 0 multiplier = {1: 4, 2: 10} roll = randint(1,",
"to buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would you like to buy \" +",
"\\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class Property(Location): \"\"\" Defines all the Properties",
"percent: {2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\" Parent Class For Each of",
"owned_by_bank(self, player): \"\"\" Gives the player the option to purchase the tile, if",
"have no houses\") else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount):",
"self.location = location self.name = name @abstractmethod def landed_on(self, player): pass class Card(Effect):",
"monopoly board :param name: (Optional, String, default=JustVisiting) Name of the Tile \"\"\" super().__init__(location,",
"50, 2: 100, 3: 150, 4: 200} for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key],",
"\"\"\" while True: try: if data is not None: position = int(data[0]) class_type",
"\"\"\" Active player gets money from all other players :param amount: (int) amount",
"board :param name: (String) Name of the Tile :param price: (int) purchase cost",
"unmortgage price :param player: (Player Object) Player that is unmortgageing the tile \"\"\"",
"No houses Five is a hotel :cost_per_house: (int) Price of one house \"\"\"",
"<reponame>KGB33/Monopoly<gh_stars>1-10 from abc import ABC, abstractmethod from UserEntity import Player, Bank, FreeParking from",
"200 elif location > 20: return 150 elif location > 10: return 100",
"effect is applied. Including Chance, Community Chest, Income tax, etc. Attributes: :location: (int)",
"JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail) tile Attributes: ---- From CornerTile Class ----",
"Each of the corner tiles Excluding Free Parking. Attributes: :location: (int) position, (0",
"player if they would like to purchase the property, displays the Name and",
"key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned])",
"== 'jail': return {'jail': Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \",",
"= \"{0} {1}\".format(self.location, self.name) return output class Go(CornerTile): \"\"\" Models GO Tile Attributes:",
"Railroad(Location): \"\"\" Defines all 4 railroads Attributes: ---- From Location Class ---- :location:",
"String) percent to tax the player \"\"\" self.percent = float(percent) super().__init__(location, name) def",
"to the board \"\"\" while True: try: if data is not None: position",
"location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location: (int) Location, (0 -",
"to jail!!\") self.active_player.position = 'jail' def house_repairs(self): \"\"\" Charges player house repairs \"\"\"",
"other players \"\"\" # TODO: implement pay all other players print(\"Lucky for {}",
"name: (String) Name of the Tile :param percent: (float or String) percent to",
"self.price * -.75) def format_owner(self): \"\"\" Formats current owner information for __str__() :return:",
":is_mortgaged: (Boolean) mortgage state of the tile \"\"\" @abstractmethod def __init__(self, location, name,",
"Checks if player will pass go if self.active_player.position >= tile_num: self.active_player.money += 200",
"+ \"\\n\" + cls.spaces[key].__str__() return output # construct the default board for testing",
"the location \"\"\" def __init__(self, location=0, name='GO'): \"\"\" :param location: (int) Location, (0",
"name: (Optional, String, default=JustVisiting) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self,",
"if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\"",
"name, percent): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"{position: SetTax(position, name, data[0])} elif class_type == \"PercentTax\": return {position: PercentTax(position, name, data[0])}",
"\"\"\" Moves player back specified number of spaces and calls that tiles landed_on",
"of the Tile :param amount: (int) amount to tax the player \"\"\" self.amount",
"should be formatted as follows: Square# ClassType class data \"\"\" loop_value = True",
"'' for tier in self.rent: rent_tiers += str(tier) + ', ' owned_by =",
"\"Owner: {0}, Price: {1}, Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return owned_by def",
"(String) Easy to read tile description \"\"\" rent_tiers = '' for tier in",
"def create_tile(data): \"\"\" Creates a tile based on the data provided :param data:",
"go if self.active_player.position >= tile_num: self.active_player.money += 200 self.active_player.position = tile_num print(\"You've been",
"\" for $\" + str(self.price) + \"? y/n\") return buy_or_pass def mortgage(self): \"\"\"",
"Community Chest Cards Attributes: ---- From Effect Class ---- :location: (int) position, (0",
"data = line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except FileNotFoundError: if",
"of the location :active_player: (Player Object) Player that the card will be affecting",
"Enter Q to quit\\n\") @classmethod def __str__(cls): \"\"\" :return: (String) Formatted __str__ method",
"as follows [\"Color\", Price, rent, rent_1_house, ..., rent_hotel] \"\"\" self.color = property_data[0] self.rent",
"return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give player money :param amount: (int) Amount",
"owed_money = 0 for key in Board.spaces: try: if Board.spaces[key].owner == self.active_player: hold",
"out of jail free card \"\"\" print(\"You got a get out of jail",
"Chooses a random random card and calls the appropriate method \"\"\" key =",
"jail \"\"\" def __init__(self, location=30, name='Go To Jail'): super().__init__(location, name) def landed_on(self, player):",
"\"\"\" Gives player a get out of jail free card \"\"\" print(\"You got",
"one house based on the location :param location: (int) location on the board",
"Community Chest, Income tax, etc. Attributes: :location: (int) position, (0 - 39), on",
"def owned_by_bank(self, player): \"\"\" Gives the player the option to purchase the tile,",
"self.gain_money(100) elif key == 8: return self.gain_money(20) elif key == 9: return self.get_money_from_all_other_players(10)",
"Object) Player that is unmortgageing the tile \"\"\" self.is_mortgaged = False self.price =",
"__init__(self, location, name, amount): \"\"\" :param location: (int) Location, (0 - 39) on",
"(1x9 array-like) list with various data formatted as follows [\"Color\", Price, rent, rent_1_house,",
"Gives owner mortgage value (1/2 price), Sets price to 1/2 price, Sets owner",
"= location_to_check if passed_go: self.active_player.money += 200 print(\"You've advanced to the next \",",
"randint(0, 16) if key == 0: return self.advance_to_tile(0) elif key == 1: return",
"self.active_player.position -= num_tiles print(\"You've been sent back \", num_tiles, \"tiles.\", \"\\nYou're now on",
"hotel :cost_per_house: (int) Price of one house \"\"\" def __init__(self, location, name, property_data):",
"player): print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail) tile Attributes:",
"Company and Water Works Attributes: ---- From Location Class ---- :location: (int) position,",
"isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1 if location_to_check > 39: location_to_check = location_to_check",
"of a tile and the content is the property \"\"\" spaces = {}",
"all utilities i.e. Electric Company and Water Works Attributes: ---- From Location Class",
"the location \"\"\" @abstractmethod def __init__(self, location, name): \"\"\" :param location: (int) Location,",
"Bank def unmortgage(self, player): \"\"\" Sets is_mortgaged to False, Sets price to full",
"in spaces \"\"\" output = '' for key in cls.spaces: output = output",
"to full price Sets owner to player Charges Player unmortgage price :param player:",
"\"\"\" Parent class for all squares where an effect is applied. Including Chance,",
"False :param player: (Player Object) Player that landed on the tile \"\"\" buy_or_pass",
"elif key == 10: return self.gain_money(100) elif key == 11: return self.lose_money(50) elif",
"def format_owner(self): \"\"\" Formats current owner information for __str__() :return: (String) Easy to",
"Name: \") with open('boards/' + file_name) as file: for line in file: if",
"owned_by = \"Owner: {0}, Price: {1}, Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return",
"(String) Name of the location ---- New in PercentTax Class ---- :percent: percent",
"from InputValidation import get_yes_or_no_input from random import randint from Exceptions import TilesClassNotFoundError class",
"$\", amount) self.active_player.money += amount def lose_money(self, amount): \"\"\" Takes player's money :param",
"mortgage value (1/2 price), Sets price to 1/2 price, Sets owner to Bank,",
"return self.advance_to_next(Railroad) elif key == 6: return self.gain_money(50) elif key == 7: self.get_out_of_jail_free()",
"if key == 0: return self.advance_to_tile(0) elif key == 1: return self.advance_to_tile(24) elif",
"name) def landed_on(self, player): \"\"\" Takes amount from player and adds it to",
"x in [7, 22, 36]} community_chest = {x: CommunityChest(x, \"Community Chest Card\") for",
"name='GO'): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"location > 30: return 200 elif location > 20: return 150 elif location",
"price) def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between owner and",
"Class that sends people to jail \"\"\" def __init__(self, location=30, name='Go To Jail'):",
"board for testing Board.default_board() class TileFactory: \"\"\" Creates all possible different tiles, used",
"key == 8: return self.gain_money(20) elif key == 9: return self.get_money_from_all_other_players(10) elif key",
"is purchased, transfers money, updates owner, and sets is_mortgaged to False :param player:",
"self.owner = player self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self): \"\"\" Formats current owner",
"(int) Amount of money to give active player \"\"\" print(\"You've gained $\", amount)",
"cost of the tile :owner: (UserEntity Object) Current Owner of the tile :is_mortgaged:",
"return {10: JustVisiting()} elif class_type == 'GoToJail': return {30: GoToJail()} elif class_type ==",
"return self.gain_money(20) elif key == 9: return self.get_money_from_all_other_players(10) elif key == 10: return",
"of the Tile \"\"\" super().__init__(location, name) def draw_card(self): \"\"\" Chooses a random random",
"name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines the price for one house based",
"+ \"? y/n\") return buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged to True, Gives",
"Properties on the board Does not include railroads or utilities Attributes: ---- From",
"player to specified tile and calls that tile's landed_on method :param tile_num: (int)",
"if self.active_player.position >= tile_num: self.active_player.money += 200 self.active_player.position = tile_num print(\"You've been moved",
"FileNotFoundError: if file_name == \"Q\": quit() print(\"File Not found, please try again.\\n\\tOr Enter",
"self.name = name self.price = price self.owner = Bank self.is_mortgaged = False super().__init__()",
"def draw_card(self): pass # -------------Card effects -------------- def advance_to_tile(self, tile_num): \"\"\" Moves player",
"Charges player rent, transfers rent between owner and player :param player: (Player Object)",
"self.gain_money(25) elif key == 14: return self.house_repairs() elif key == 15: return self.gain_money(10)",
"property, displays the Name and price :return: (Boolean) True if the player would",
"else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active player gets money from all other",
"line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except FileNotFoundError: if file_name ==",
":param price: (Optional, int, default=150) purchase cost of the tile \"\"\" super().__init__(location, name,",
"Board.spaces: try: if Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses owed_money += 25 *",
"on the monopoly board :name: (String) Name of the location ---- New in",
"return {position: CommunityChest(position, name)} elif class_type == \"SetTax\": return {position: SetTax(position, name, data[0])}",
"data[4:]] data = [data[3], ] + last_part_data if class_type == \"Property\": return {position:",
"tile \"\"\" num_railroads_owned = 0 cost = {1: 50, 2: 100, 3: 150,",
"\"\"\" if location > 30: return 200 elif location > 20: return 150",
"super().__init__(location, name) def landed_on(self, player): player.position = 'jail' print(\"Go To Jail!!!\") class Jail(CornerTile):",
":return: (String) Easy to read tile description \"\"\" output = \"{0} {1}\".format(self.location, self.name)",
"of houses on the property, 0 - 5 Zero is No houses Five",
"as follows: Square# ClassType class data \"\"\" loop_value = True while loop_value: try:",
"amount: (int) Amount of money to give active player \"\"\" print(\"You've gained $\",",
"self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the player if they would like to purchase",
"\"\"\" Creates a tile based on the data provided :param data: Data read",
"location ---- New In Card Class ---- :active_player: (Player Object) Player that the",
"\"Railroad\": return {position: Railroad(position, name)} elif class_type == \"Chance\": return {position: Chance(position, name)}",
"the Tile \"\"\" super().__init__(location, name) def draw_card(self): \"\"\" Chooses a random random card",
"tile \"\"\" super().__init__(location, name, price) def owned_by_player(self, player): \"\"\" Charges player rent, transfers",
"tile ---- New in Property Class ---- :color: (String) Color of the property",
"13: return self.gain_money(25) elif key == 14: return self.house_repairs() elif key == 15:",
"amount gotten from other players \"\"\" amount = amount * -1 self.pay_all_other_players(amount) def",
":param name: (String) Name of the Tile \"\"\" super().__init__(location, name) def draw_card(self): \"\"\"",
"depending on who landed on the property and who owns the property :param",
"to examples: Railroad, Utility, Card \"\"\" location_to_check = self.active_player.position + 1 passed_go =",
":param price: (int) purchase cost of the tile \"\"\" self.location = location self.name",
"11: return self.lose_money(15) elif key == 12: return self.advance_to_tile(5) elif key == 13:",
"the monopoly board :param name: (String) Name of the Tile :param price: (int)",
"landed_on(self, player): pass class Card(Effect): \"\"\" Parent Class for Chance and Community Chest",
"elif key == 12: return self.lose_money(150) elif key == 13: return self.gain_money(25) elif",
"while loop_value: try: if get_yes_or_no_input('Would You Like To Use The Standard Board?'): file_name",
"= location self.name = name self.price = price self.owner = Bank self.is_mortgaged =",
"board :name: (String) Name of the location ---- New In Card Class ----",
"{1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\" Parent Class",
"of tiles to be moved back \"\"\" self.active_player.position -= num_tiles print(\"You've been sent",
"board :param name: (String) Name of the Tile :param price: (Optional, int, default=150)",
"self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all 4 railroads Attributes: ----",
"self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self): \"\"\" Formats current owner information for __str__()",
"print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not Found!\") break except IndexError: data = None",
"is a hotel :cost_per_house: (int) Price of one house \"\"\" def __init__(self, location,",
"16: return self.gain_money(100) else: return print(\"Bad Chance Card Draw\") class CommunityChest(Card): \"\"\" All",
":param name: (String) Name of the Tile :param percent: (float or String) percent",
"player): \"\"\" Charges player rent, transfers rent between owner and player :param player:",
"free parking :param player: (Player Object) Player that landed on Percent Tax \"\"\"",
"self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class for all squares where an effect",
"self.owner == Bank: self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\"",
"key == 11: return self.lose_money(50) elif key == 12: return self.lose_money(150) elif key",
"@classmethod def default_board(cls): \"\"\" Builds a default board for testing \"\"\" cls.spaces =",
"+ file_name) as file: for line in file: if not line.startswith('#'): data =",
"return {position: Utility(position, name)} elif class_type == \"Railroad\": return {position: Railroad(position, name)} elif",
"and sets is_mortgaged to False :param player: (Player Object) Player that landed on",
"\\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\" Parent Class For",
"default board for testing Board.default_board() class TileFactory: \"\"\" Creates all possible different tiles,",
"to purchase the tile, if the tile is purchased, transfers money, updates owner,",
"player would not like to buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would you like",
"of the Tile :param percent: (float or String) percent to tax the player",
"cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read in a board from file. Each",
"\"\"\" Gives the player the option to purchase the tile, if the tile",
"= self.active_player.position + 1 passed_go = False while not isinstance( Board.spaces[location_to_check], class_type): location_to_check",
"Card Class ---- :location: (int) position, (0 - 39), on the monopoly board",
"Location, (0 - 39) on the monopoly board :param name: (String) Name of",
"specified number of spaces and calls that tiles landed_on method. :param num_tiles: (int)",
"__str__(cls): \"\"\" :return: (String) Formatted __str__ method for all objects in spaces \"\"\"",
"-------------- def advance_to_tile(self, tile_num): \"\"\" Moves player to specified tile and calls that",
"== 11: return self.lose_money(15) elif key == 12: return self.advance_to_tile(5) elif key ==",
"location=0, name='GO'): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"description \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Chance(Card): \"\"\" All",
"self.percent) class CornerTile(ABC): \"\"\" Parent Class For Each of the corner tiles Excluding",
"the Name and price :return: (Boolean) True if the player would like to",
"board :name: (String) Name of the location :active_player: (Player Object) Player that the",
"i.e. Electric Company and Water Works Attributes: ---- From Location Class ---- :location:",
"class_type == 'GoToJail': return {30: GoToJail()} elif class_type == 'jail': return {'jail': Jail()}",
"if owed_money == 0: print(\"Lucky for you, you have no houses\") else: print(\"You",
"(int) purchase cost of the tile :owner: (UserEntity Object) Current Owner of the",
"owned_by = \"Owner: {0}, Current Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by =",
"super().__init__(location, name) def landed_on(self, player): \"\"\" Takes amount from player and adds it",
"monopoly board :param name: (String) Name of the Tile :param amount: (int) amount",
"on the monopoly board :name: (String) Name of the location :active_player: (Player Object)",
"14: return self.house_repairs() elif key == 15: return self.gain_money(10) elif key == 16:",
"return self.get_money_from_all_other_players(50) elif key == 7: return self.gain_money(100) elif key == 8: return",
"is not dependant on the player's wealth Attributes: ---- From Effect Class ----",
"get_yes_or_no_input from random import randint from Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract",
".format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output def format_current_rent(self): \"\"\" Formats",
"as file: for line in file: if not line.startswith('#'): data = line.split() new_tile",
"default_board(cls): \"\"\" Builds a default board for testing \"\"\" cls.spaces = {} streets",
"SetTax(position, name, data[0])} elif class_type == \"PercentTax\": return {position: PercentTax(position, name, data[0])} elif",
"CommunityChest(position, name)} elif class_type == \"SetTax\": return {position: SetTax(position, name, data[0])} elif class_type",
"== \"Go\": return {0: Go()} elif class_type == 'JustVisiting': return {10: JustVisiting()} elif",
"= price self.owner = Bank self.is_mortgaged = False super().__init__() def landed_on(self, player): \"\"\"",
"formatted as follows [\"Color\", Price, rent, rent_1_house, ..., rent_hotel] \"\"\" self.color = property_data[0]",
":amount: Amount to tax the player \"\"\" def __init__(self, location, name, amount): \"\"\"",
"gained $\", amount) self.active_player.money += amount def lose_money(self, amount): \"\"\" Takes player's money",
"is_mortgaged to False, Sets price to full price Sets owner to player Charges",
"Utility(position, name)} elif class_type == \"Railroad\": return {position: Railroad(position, name)} elif class_type ==",
"on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self): \"\"\" :return: (String)",
"list with various data formatted as follows [\"Color\", Price, rent, rent_1_house, ..., rent_hotel]",
"elif key == 8: return self.gain_money(20) elif key == 9: return self.get_money_from_all_other_players(10) elif",
"x in data[3:]] except ValueError: last_part_data = [int(x) for x in data[4:]] data",
"\"\"\" :return: (String) Formatted __str__ method for all objects in spaces \"\"\" output",
"tile's landed_on method :param tile_num: (int) Tile the active player will be moved",
"{x: Property(x, \"Name\", [\"Color\", 150, 5, 10, 20, 40, 80, 160]) for x",
"to tax the player \"\"\" self.amount = int(amount) super().__init__(location, name) def landed_on(self, player):",
"[12, 28]} chances = {x: Chance(x, \"Chance Card\") for x in [7, 22,",
"mortgage state of the tile \"\"\" @abstractmethod def __init__(self, location, name, price): \"\"\"",
"Object) Player that landed on the tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass:",
"Board.default_board() class TileFactory: \"\"\" Creates all possible different tiles, used with read_in_board in",
"Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class for all",
"price for one house based on the location :param location: (int) location on",
"== 10: return self.gain_money(100) elif key == 11: return self.lose_money(50) elif key ==",
"Excluding Free Parking. Attributes: :location: (int) position, (0 - 39), on the monopoly",
"\"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location: (int) Location, (0 - 39)",
"30: return 200 elif location > 20: return 150 elif location > 10:",
"Charges Player unmortgage price :param player: (Player Object) Player that is unmortgageing the",
"print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class that sends people to jail \"\"\"",
"= data[2] try: data = [int(x) for x in data[3:]] except ValueError: last_part_data",
"== 5: return self.advance_to_next(Railroad) elif key == 6: return self.gain_money(50) elif key ==",
"in [7, 22, 36]} community_chest = {x: CommunityChest(x, \"Community Chest Card\") for x",
"position, (0 - 39), on the monopoly board :name: (String) Name of the",
"return self.gain_money(10) elif key == 16: return self.gain_money(100) else: print(\"bad CC draw\") class",
"amount from player and adds it to Free Parking :param player: (Player Object)",
"Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class that sends people to jail \"\"\" def",
"location, name, property_data): \"\"\" :param location: (Int) position on the board, int from",
"\"\"\" @abstractmethod def __init__(self, location, name): \"\"\" :param location: (int) Location, (0 -",
"Free Parking :param player: (Player Object) Player that landed on tile \"\"\" Board.spaces[20].exchange_money(player,",
"class Effect(ABC): \"\"\" Parent class for all squares where an effect is applied.",
"elif key == 9: return self.go_to_jail() elif key == 10: return self.house_repairs() elif",
"\"\"\" Sends the player to jail, player does not pass go and does",
"except IndexError: data = None class SetTax(Effect): \"\"\" Charges player a set tax",
"out of jail free card\", \"\\n\\t you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards +=",
"elif key == 1: return self.advance_to_tile(24) elif key == 2: return self.advance_to_tile(11) elif",
"return 100 else: return 50 def __str__(self): \"\"\" :return: (String) Easy to read",
"the monopoly board :name: (String) Name of the location ---- New In Card",
"(Player Object) Player that landed on tile \"\"\" num_railroads_owned = 0 cost =",
"player): # TODO: find a way to print out what players are in",
"= property_data[0] self.rent = property_data[2:] self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name,",
"10: return self.house_repairs() elif key == 11: return self.lose_money(15) elif key == 12:",
"description \"\"\" rent_tiers = '' for tier in self.rent: rent_tiers += str(tier) +",
"Object) Player that landed on tile \"\"\" num_utils_owned = 0 multiplier = {1:",
"\"\"\" self.color = property_data[0] self.rent = property_data[2:] self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location)",
"key == 0: return self.advance_to_tile(0) elif key == 1: return self.advance_to_tile(24) elif key",
"various data formatted as follows [\"Color\", Price, rent, rent_1_house, ..., rent_hotel] \"\"\" self.color",
"amount to pay other players \"\"\" # TODO: implement pay all other players",
"Owner of the tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" def",
"random random card and calls the appropriate method \"\"\" key = randint(0, 16)",
"that tile's landed_on method :param tile_num: (int) Tile the active player will be",
"board :param name: (String) Name of the Tile \"\"\" self.location = location self.name",
"Draw\") class CommunityChest(Card): \"\"\" All Community Chest Cards Attributes: ---- From Card Class",
"super().__init__(location, name) def landed_on(self, player): # TODO: find a way to print out",
"and calls that tile's landed_on method :param tile_num: (int) Tile the active player",
"elif key == 3: return self.gain_money(50) elif key == 4: return self.get_out_of_jail_free() elif",
":name: (String) Name of the location ---- New in PercentTax Class ---- :percent:",
"self.advance_to_next(Utility) elif key == 4: return self.advance_to_next(Railroad) elif key == 5: return self.advance_to_next(Railroad)",
"of the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self, player): \"\"\" Charges player",
"Active player pays all other players specified amount :param amount: (int) amount to",
"back \", num_tiles, \"tiles.\", \"\\nYou're now on tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on()",
"40, 80, 160]) for x in range(0, 40)} railroads = {x: Railroad(x, \"Name\")",
"int from 0 to 39 :param property_data: (1x9 array-like) list with various data",
"\"\"\" Asks the player if they would like to purchase the property, displays",
"railroads = {x: Railroad(x, \"Name\") for x in [5, 15, 25, 35]} utilities",
"Player that landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the",
"location, name, price): \"\"\" :param location: (int) Location, (0 - 39) on the",
"> 39: location_to_check = location_to_check % 40 passed_go = True self.active_player.position = location_to_check",
"of the location ---- New in SetTax Class ---- :amount: Amount to tax",
"if class_type == \"Property\": return {position: Property(position, name, data)} elif class_type == \"Utility\":",
"__init__(self, location, name, price=150): \"\"\" :param location: (int) Location, (0 - 39) on",
"/ 2 Bank.exchange_money(self.owner, self.price) self.owner = Bank def unmortgage(self, player): \"\"\" Sets is_mortgaged",
"property_data[2:] self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location):",
"= False super().__init__() def landed_on(self, player): \"\"\" Calls the proper function depending on",
"\"\"\" Parent Class For Each of the corner tiles Excluding Free Parking. Attributes:",
"read in a board from file. Each line in the file should be",
"(String) Name of the location ---- New in SetTax Class ---- :amount: Amount",
"= {x: Railroad(x, \"Name\") for x in [5, 15, 25, 35]} utilities =",
"\" + self.name + \" for $\" + str(self.price) + \"? y/n\") return",
"def gain_money(self, amount): \"\"\" Give player money :param amount: (int) Amount of money",
"200} for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned])",
"{1: 4, 2: 10} roll = randint(1, 6) for key in self.owner.owned_properites: if",
"to be added to the board \"\"\" while True: try: if data is",
"jail!!\") self.active_player.position = 'jail' def house_repairs(self): \"\"\" Charges player house repairs \"\"\" owed_money",
"@staticmethod def set_cost_per_house(location): \"\"\" Determines the price for one house based on the",
"location \"\"\" @abstractmethod def __init__(self, location, name): \"\"\" :param location: (int) Location, (0",
"follows: Square# ClassType class data \"\"\" loop_value = True while loop_value: try: if",
"(int) Location, (0 - 39) on the monopoly board :param name: (Optional, String,",
"A dictionary where the key is the location of a tile and the",
":return: A tile to be added to the board \"\"\" while True: try:",
"= 0 cost = {1: 50, 2: 100, 3: 150, 4: 200} for",
"= None super().__init__(location, name) def landed_on(self, player): \"\"\" Sets Active player to player,",
"= \"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4}, Number Of",
"Takes amount from player and adds it to Free Parking :param player: (Player",
"for x in range(0, 40)} railroads = {x: Railroad(x, \"Name\") for x in",
"the Tile :param amount: (int) amount to tax the player \"\"\" self.amount =",
"sent to jail!!\") self.active_player.position = 'jail' def house_repairs(self): \"\"\" Charges player house repairs",
"+= 200 self.active_player.position = tile_num print(\"You've been moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\",",
"tax the player \"\"\" def __init__(self, location, name, percent): \"\"\" :param location: (int)",
"self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles): \"\"\" Moves player back specified number",
"read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location,",
"self.color = property_data[0] self.rent = property_data[2:] self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location,",
"for x in data[3:]] except ValueError: last_part_data = [int(x) for x in data[4:]]",
"read tile description \"\"\" rent_tiers = '' for tier in self.rent: rent_tiers +=",
"next \", str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\"",
"# construct the default board for testing Board.default_board() class TileFactory: \"\"\" Creates all",
"read tile description \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Chance(Card):",
"spaces and calls that tiles landed_on method. :param num_tiles: (int) Number of tiles",
"Easy to read owner information \"\"\" if isinstance(self.owner, Bank): owned_by = \"Owner: {0},",
"the player would not like to buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would you",
"tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" @abstractmethod def __init__(self, location,",
"---- From CornerTile Class ---- :location: (int) position, (0 - 39), on the",
"= self.ask_buy_or_pass() if buy_or_pass: # buy player.money = player.money - self.price self.owner =",
"cls.spaces.update(new_tile) loop_value = False except FileNotFoundError: if file_name == \"Q\": quit() print(\"File Not",
"return output class Property(Location): \"\"\" Defines all the Properties on the board Does",
"Class ---- :percent: percent to tax the player \"\"\" def __init__(self, location, name,",
"that landed on the tile \"\"\" if self.owner == Bank: self.owned_by_bank(player) elif self.owner",
"(1/2 price), Sets price to 1/2 price, Sets owner to Bank, \"\"\" self.is_mortgaged",
"\"\"\" Charges player percent of their total wealth and gives it to free",
"unmortgage(self, player): \"\"\" Sets is_mortgaged to False, Sets price to full price Sets",
"key == 14: return self.pay_all_other_players(50) elif key == 15: return self.gain_money(150) elif key",
"purchase the tile, if the tile is purchased, transfers money, updates owner, and",
"to \"\"\" # Checks if player will pass go if self.active_player.position >= tile_num:",
"the monopoly board :param name: (String) Name of the Tile \"\"\" self.location =",
"Abstract Parent Class for all locations on the board Attributes: :location: (int) position,",
"random card and calls the appropriate method \"\"\" key = randint(0, 16) if",
"(String) Name of the Tile :param price: (int) purchase cost of the tile",
"jail free card\", \"\\n\\t you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def",
"description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount)",
"', ' owned_by = self.format_owner() output = \"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\",
"except AttributeError: # Corner Tiles have no attribute owner, skipped pass print(\"House repairs",
"Q to quit\\n\") @classmethod def __str__(cls): \"\"\" :return: (String) Formatted __str__ method for",
"board :param name: (Optional, String, default=GO) Name of the Tile \"\"\" super().__init__(location, name)",
"passed_go = True self.active_player.position = location_to_check if passed_go: self.active_player.money += 200 print(\"You've advanced",
"streets = {x: Property(x, \"Name\", [\"Color\", 150, 5, 10, 20, 40, 80, 160])",
"location self.name = name self.price = price self.owner = Bank self.is_mortgaged = False",
"== 10: return self.house_repairs() elif key == 11: return self.lose_money(15) elif key ==",
"board :name: (String) Name of the location ---- New in PercentTax Class ----",
"10: return self.gain_money(100) elif key == 11: return self.lose_money(50) elif key == 12:",
"landed_on(self, player): pass def __str__(self): \"\"\" :return: (String) Description of the tile \"\"\"",
"to False, Sets price to full price Sets owner to player Charges Player",
"= [int(x) for x in data[4:]] data = [data[3], ] + last_part_data if",
"get_yes_or_no_input( \"Would you like to buy \" + self.name + \" for $\"",
"200 print(\"You've advanced to the next \", str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return",
"print(\"File Not found, please try again.\\n\\tOr Enter Q to quit\\n\") @classmethod def __str__(cls):",
"the data provided :param data: Data read in from a file :return: A",
"give active player \"\"\" print(\"You've gained $\", amount) self.active_player.money += amount def lose_money(self,",
"Easy to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax percent:",
"---- :active_player: (Player Object) Player that the card will be affecting \"\"\" def",
"self.active_player = player return self.draw_card() def draw_card(self): pass # -------------Card effects -------------- def",
"key == 8: return self.go_back(3) elif key == 9: return self.go_to_jail() elif key",
"player's wealth Attributes: ---- From Effect Class ---- :location: (int) position, (0 -",
"\"\"\" self.is_mortgaged = True self.price = self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner =",
"\"\"\" self.active_player = player return self.draw_card() def draw_card(self): pass # -------------Card effects --------------",
"players \"\"\" # TODO: implement pay all other players print(\"Lucky for {} I",
"- 39), on the monopoly board :name: (String) Name of the location :price:",
"state of the tile \"\"\" @abstractmethod def __init__(self, location, name, price): \"\"\" :param",
"is applied. Including Chance, Community Chest, Income tax, etc. Attributes: :location: (int) position,",
"Name of the Tile \"\"\" self.active_player = None super().__init__(location, name) def landed_on(self, player):",
"name @abstractmethod def landed_on(self, player): pass class Card(Effect): \"\"\" Parent Class for Chance",
"purchase cost of the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self, player): \"\"\"",
"else: owned_by = \"Owner: {0}, Price: {1}, Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged)",
":param class_type: (Object) class of tile to advance to examples: Railroad, Utility, Card",
"that landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return: (String) Easy",
"return self.house_repairs() elif key == 11: return self.lose_money(15) elif key == 12: return",
"the location ---- New in SetTax Class ---- :amount: Amount to tax the",
"0 - 5 Zero is No houses Five is a hotel :cost_per_house: (int)",
"{0}, Price: {1}, Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return owned_by def __str__(self):",
"== 'JustVisiting': return {10: JustVisiting()} elif class_type == 'GoToJail': return {30: GoToJail()} elif",
"self.get_out_of_jail_free() elif key == 5: return self.go_to_jail() elif key == 6: return self.get_money_from_all_other_players(50)",
"False self.price = self.price * 2 self.owner = player self.owner.exchange_money(self.owner, self.price * -.75)",
"format_owner(self): \"\"\" Formats current owner information for __str__() :return: (String) Easy to read",
"to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances",
"{5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return",
"{0}, Current Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by = \"Owner: {0}, Price:",
"\"\"\" output = '' for key in cls.spaces: output = output + \"\\n\"",
"print(\"You've advanced to the next \", str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player)",
"tile and calls that tile's landed_on method :param tile_num: (int) Tile the active",
"one house \"\"\" if location > 30: return 200 elif location > 20:",
"class Utility(Location): \"\"\" Defines all utilities i.e. Electric Company and Water Works Attributes:",
"try: if Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses owed_money += 25 * hold",
"Name of the location :price: (int) purchase cost of the tile :owner: (UserEntity",
"y/n\") return buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged to True, Gives owner mortgage",
"Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location, name) def landed_on(self, player): pass",
"or utilities Attributes: ---- From Location Class ---- :location: (int) position, (0 -",
"paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\" Active player pays all",
"Class ---- :color: (String) Color of the property :rent: (1x6 array-like) Rent tiers",
"in Board \"\"\" @staticmethod def create_tile(data): \"\"\" Creates a tile based on the",
"return {'jail': Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \"",
"Standard Board?'): file_name = 'StandardBoard' else: file_name = input(\"Please enter the file Name:",
"self.price = self.price * 2 self.owner = player self.owner.exchange_money(self.owner, self.price * -.75) def",
"Amount of money to give active player \"\"\" print(\"You've gained $\", amount) self.active_player.money",
"__str__ method for all objects in spaces \"\"\" output = '' for key",
"print(\"bad CC draw\") class Board(object): \"\"\" The Monopoly Board Attributes: :spaces: (Dict) A",
"class of tile to advance to examples: Railroad, Utility, Card \"\"\" location_to_check =",
"Sends the player to jail, player does not pass go and does not",
"self.owner != player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives the player the option",
"def advance_to_tile(self, tile_num): \"\"\" Moves player to specified tile and calls that tile's",
"== 7: self.get_out_of_jail_free() elif key == 8: return self.go_back(3) elif key == 9:",
"\"\"\" read in a board from file. Each line in the file should",
"self.active_player.position + 1 passed_go = False while not isinstance( Board.spaces[location_to_check], class_type): location_to_check +=",
"x in [12, 28]} chances = {x: Chance(x, \"Chance Card\") for x in",
"Sets price to full price Sets owner to player Charges Player unmortgage price",
"name, price) def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between owner",
"\"\"\" Moves player to specified tile and calls that tile's landed_on method :param",
"Determines the price for one house based on the location :param location: (int)",
"def __init__(self, location, name): \"\"\" :param location: (int) Location, (0 - 39) on",
"hold = Board.spaces[key].number_of_houses owed_money += 25 * hold except AttributeError: # Corner Tiles",
"and player :param player: (Player Object) Player that landed on tile \"\"\" num_railroads_owned",
"on the monopoly board :param name: (String) Name of the Tile :param percent:",
"IndexError: data = None class SetTax(Effect): \"\"\" Charges player a set tax amount,",
"be formatted as follows: Square# ClassType class data \"\"\" loop_value = True while",
"TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not Found!\") break except IndexError: data =",
"- self.price self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price = self.price",
"of the tile :is_mortgaged: (Boolean) mortgage state of the tile ---- New in",
"to 39 :param property_data: (1x9 array-like) list with various data formatted as follows",
"20, 40, 80, 160]) for x in range(0, 40)} railroads = {x: Railroad(x,",
"{6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output def format_current_rent(self): \"\"\"",
"(UserEntity Object) Current Owner of the tile :is_mortgaged: (Boolean) mortgage state of the",
"class_type): location_to_check += 1 if location_to_check > 39: location_to_check = location_to_check % 40",
"data[3:]] except ValueError: last_part_data = [int(x) for x in data[4:]] data = [data[3],",
"location ---- New in SetTax Class ---- :amount: Amount to tax the player",
"include railroads or utilities Attributes: ---- From Location Class ---- :location: (int) position,",
"39) on the monopoly board :param name: (Optional, String, default=GO) Name of the",
"\"Q\": quit() print(\"File Not found, please try again.\\n\\tOr Enter Q to quit\\n\") @classmethod",
"== \"Chance\": return {position: Chance(position, name)} elif class_type == \"CommunityChest\": return {position: CommunityChest(position,",
"transfers money, updates owner, and sets is_mortgaged to False :param player: (Player Object)",
"advanced to the next \", str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def",
"self.advance_to_tile(39) elif key == 14: return self.pay_all_other_players(50) elif key == 15: return self.gain_money(150)",
"== 12: return self.lose_money(150) elif key == 13: return self.gain_money(25) elif key ==",
"4: 200} for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player,",
"player player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price = self.price * 2 def owned_by_player(self,",
"self.go_back(3) elif key == 9: return self.go_to_jail() elif key == 10: return self.house_repairs()",
"free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls):",
"return output # construct the default board for testing Board.default_board() class TileFactory: \"\"\"",
"\"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return: (String) Easy to read tile description",
"line.startswith('#'): data = line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except FileNotFoundError:",
"player's money :param amount: (int) amount of money to take from active player",
"get out of jail free card\", \"\\n\\t you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards",
"return self.advance_to_tile(11) elif key == 3: return self.advance_to_next(Utility) elif key == 4: return",
"ABC, abstractmethod from UserEntity import Player, Bank, FreeParking from InputValidation import get_yes_or_no_input from",
"to player, then calls draw_card() :param player: (Player Object) Player that landed on",
"different tiles, used with read_in_board in Board \"\"\" @staticmethod def create_tile(data): \"\"\" Creates",
"advance_to_next(self, class_type): \"\"\" Advances active player to the next tile of specified class",
"(String) Name of the Tile :param price: (Optional, int, default=150) purchase cost of",
"then calls draw_card() :param player: (Player Object) Player that landed on card tile",
"houses on the property, 0 - 5 Zero is No houses Five is",
"(int) location on the board :return: (int) cost for one house \"\"\" if",
"1 passed_go = False while not isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1 if",
"== 2: return self.advance_to_tile(11) elif key == 3: return self.advance_to_next(Utility) elif key ==",
"Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self): \"\"\" :return: (String) Easy to read tile",
"return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the player to jail, player does not",
"of their total wealth and gives it to free parking :param player: (Player",
"self.name) return output class Chance(Card): \"\"\" All Chance Cards Attributes: ---- From Card",
"150, 4: 200} for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1",
"def advance_to_next(self, class_type): \"\"\" Advances active player to the next tile of specified",
"set_cost_per_house(location): \"\"\" Determines the price for one house based on the location :param",
"+= 200 print(\"You've advanced to the next \", str(class_type), \"\\n\\tTile Number: \", self.active_player.position)",
"{x: Railroad(x, \"Name\") for x in [5, 15, 25, 35]} utilities = {x:",
"name, price=200): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"4, 2: 10} roll = randint(1, 6) for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key],",
"@classmethod def __str__(cls): \"\"\" :return: (String) Formatted __str__ method for all objects in",
"The Monopoly Board Attributes: :spaces: (Dict) A dictionary where the key is the",
"-.75) def format_owner(self): \"\"\" Formats current owner information for __str__() :return: (String) Easy",
"location, name, price=150): \"\"\" :param location: (int) Location, (0 - 39) on the",
"Moves player to specified tile and calls that tile's landed_on method :param tile_num:",
"== 3: return self.gain_money(50) elif key == 4: return self.get_out_of_jail_free() elif key ==",
"Tile \"\"\" super().__init__(location, name) def landed_on(self, player): # TODO: find a way to",
"Chest Cards Attributes: ---- From Card Class ---- :location: (int) position, (0 -",
"self.name = name @abstractmethod def landed_on(self, player): pass class Card(Effect): \"\"\" Parent Class",
"{2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4}, Number Of Houses: {5}\" \\",
"from file. Each line in the file should be formatted as follows: Square#",
"(String) Description of the tile \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output",
"output = '' for key in cls.spaces: output = output + \"\\n\" +",
"player return self.draw_card() def draw_card(self): pass # -------------Card effects -------------- def advance_to_tile(self, tile_num):",
"calls draw_card() :param player: (Player Object) Player that landed on card tile :return:",
"(String) Name of the location :price: (int) purchase cost of the tile :owner:",
"\"\"\" Calls the proper function depending on who landed on the property and",
"attribute owner, skipped pass print(\"House repairs are expensive!\") if owed_money == 0: print(\"Lucky",
"get_money_from_all_other_players(self, amount): \"\"\" Active player gets money from all other players :param amount:",
"passed_go = False while not isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1 if location_to_check",
"Class ---- :active_player: (Player Object) Player that the card will be affecting \"\"\"",
"jail, player does not pass go and does not collect $200 \"\"\" print(\"Oh",
"self.active_player.money += amount def lose_money(self, amount): \"\"\" Takes player's money :param amount: (int)",
":param amount: (int) amount gotten from other players \"\"\" amount = amount *",
"key == 5: return self.advance_to_next(Railroad) elif key == 6: return self.gain_money(50) elif key",
"self.gain_money(50) elif key == 7: self.get_out_of_jail_free() elif key == 8: return self.go_back(3) elif",
"25, 35]} utilities = {x: Utility(x, \"Name\") for x in [12, 28]} chances",
"elif class_type == \"Railroad\": return {position: Railroad(position, name)} elif class_type == \"Chance\": return",
"board Attributes: :location: (int) position, (0 - 39), on the monopoly board :name:",
"def house_repairs(self): \"\"\" Charges player house repairs \"\"\" owed_money = 0 for key",
"key == 9: return self.get_money_from_all_other_players(10) elif key == 10: return self.gain_money(100) elif key",
"28]} chances = {x: Chance(x, \"Chance Card\") for x in [7, 22, 36]}",
"New in Property Class ---- :color: (String) Color of the property :rent: (1x6",
"super().__init__(location, name) def landed_on(self, player): \"\"\" Charges player percent of their total wealth",
"From Effect Class ---- :location: (int) position, (0 - 39), on the monopoly",
"objects in spaces \"\"\" output = '' for key in cls.spaces: output =",
"self.price * 2 def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between",
"(String) Easy to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax",
"monopoly board :name: (String) Name of the location ---- New in PercentTax Class",
"format_current_rent(self): \"\"\" Formats Current rent for __str__ :return: (String) Current Rent \"\"\" return",
"False self.price = self.price * 2 def owned_by_player(self, player): \"\"\" Charges player rent,",
"\"\"\" print(\"You've gained $\", amount) self.active_player.money += amount def lose_money(self, amount): \"\"\" Takes",
"\"CommunityChest\": return {position: CommunityChest(position, name)} elif class_type == \"SetTax\": return {position: SetTax(position, name,",
"(int) purchase cost of the tile \"\"\" self.location = location self.name = name",
"the board :return: (int) cost for one house \"\"\" if location > 30:",
"output def format_current_rent(self): \"\"\" Formats Current rent for __str__ :return: (String) Current Rent",
"name: (String) Name of the Tile \"\"\" self.location = location self.name = name",
"default=200) purchase cost of the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self, player):",
"location of a tile and the content is the property \"\"\" spaces =",
"True if the player would like to buy False if the player would",
"tile :return: calls draw_card \"\"\" self.active_player = player return self.draw_card() def draw_card(self): pass",
":param percent: (float or String) percent to tax the player \"\"\" self.percent =",
"Corner Tiles have no attribute owner, skipped pass print(\"House repairs are expensive!\") if",
"output # construct the default board for testing Board.default_board() class TileFactory: \"\"\" Creates",
"\"\"\" # Checks if player will pass go if self.active_player.position >= tile_num: self.active_player.money",
"affecting \"\"\" def __init__(self, location, name): \"\"\" :param location: (int) Location, (0 -",
"self.price) self.owner = Bank def unmortgage(self, player): \"\"\" Sets is_mortgaged to False, Sets",
"Chance Card Draw\") class CommunityChest(Card): \"\"\" All Community Chest Cards Attributes: ---- From",
"property :rent: (1x6 array-like) Rent tiers for the property :number_of_houses: (int) Number of",
"mortgage state of the tile ---- New in Property Class ---- :color: (String)",
"self.format_owner()) return output class Property(Location): \"\"\" Defines all the Properties on the board",
"(String) Easy to read tile description \"\"\" output = \"{0} {1}\".format(self.location, self.name) return",
":param amount: (int) amount of money to take from active player \"\"\" print(\"You've",
"the default board for testing Board.default_board() class TileFactory: \"\"\" Creates all possible different",
"state of the tile \"\"\" def __init__(self, location, name, price=150): \"\"\" :param location:",
"elif key == 11: return self.lose_money(15) elif key == 12: return self.advance_to_tile(5) elif",
"owed_money) def pay_all_other_players(self, amount): \"\"\" Active player pays all other players specified amount",
"please try again.\\n\\tOr Enter Q to quit\\n\") @classmethod def __str__(cls): \"\"\" :return: (String)",
"on the board Does not include railroads or utilities Attributes: ---- From Location",
"\"{0} {1}\".format(self.location, self.name) return output class Go(CornerTile): \"\"\" Models GO Tile Attributes: ----",
"-1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\"",
"You Like To Use The Standard Board?'): file_name = 'StandardBoard' else: file_name =",
"player, then calls draw_card() :param player: (Player Object) Player that landed on card",
"of money to take from active player \"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player,",
"= Board.spaces[key].number_of_houses owed_money += 25 * hold except AttributeError: # Corner Tiles have",
"player \"\"\" def __init__(self, location, name, amount): \"\"\" :param location: (int) Location, (0",
"= self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner = Bank def unmortgage(self, player): \"\"\"",
"in [12, 28]} chances = {x: Chance(x, \"Chance Card\") for x in [7,",
"self.go_to_jail() elif key == 6: return self.get_money_from_all_other_players(50) elif key == 7: return self.gain_money(100)",
"class CornerTile(ABC): \"\"\" Parent Class For Each of the corner tiles Excluding Free",
"if the player would not like to buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would",
"it to free parking :param player: (Player Object) Player that landed on Percent",
"def __init__(self, location='jail', name='jail'): super().__init__(location, name) def landed_on(self, player): pass def __str__(self): return",
"(Boolean) mortgage state of the tile \"\"\" def __init__(self, location, name, price=150): \"\"\"",
"utilities Attributes: ---- From Location Class ---- :location: (int) position, (0 - 39),",
"self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give player money :param amount: (int)",
"elif key == 12: return self.advance_to_tile(5) elif key == 13: return self.advance_to_tile(39) elif",
"\\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4}, Number Of Houses: {5}\" \\ \"\\n\\tRent",
"take from active player \"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self):",
"board for testing \"\"\" cls.spaces = {} streets = {x: Property(x, \"Name\", [\"Color\",",
"location ---- New in PercentTax Class ---- :percent: percent to tax the player",
"name: (String) Name of the Tile :param amount: (int) amount to tax the",
"Chance, Community Chest, Income tax, etc. Attributes: :location: (int) position, (0 - 39),",
"print(\"You got a get out of jail free card\", \"\\n\\t you now have",
"property and who owns the property :param player: (Player Object) The player that",
":param location: (Int) position on the board, int from 0 to 39 :param",
"data provided :param data: Data read in from a file :return: A tile",
"class_type == \"SetTax\": return {position: SetTax(position, name, data[0])} elif class_type == \"PercentTax\": return",
"get out of jail free card \"\"\" print(\"You got a get out of",
"formatted as follows: Square# ClassType class data \"\"\" loop_value = True while loop_value:",
"\\ .format(self.owner, self.price, self.is_mortgaged) return owned_by def __str__(self): \"\"\" :return: (String) Easy to",
"Object) Player that the card will be affecting \"\"\" def __init__(self, location, name):",
"\") with open('boards/' + file_name) as file: for line in file: if not",
"{position: PercentTax(position, name, data[0])} elif class_type == \"FreeParking\": return {position: FreeParking(position)} elif class_type",
"return buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged to True, Gives owner mortgage value",
"owner information for __str__() :return: (String) Easy to read owner information \"\"\" if",
"= self.price * 2 self.owner = player self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self):",
"elif location > 10: return 100 else: return 50 def __str__(self): \"\"\" :return:",
"key in Board.spaces: try: if Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses owed_money +=",
"abc import ABC, abstractmethod from UserEntity import Player, Bank, FreeParking from InputValidation import",
"output = \"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4}, Number",
"(String) Name of the location \"\"\" @abstractmethod def __init__(self, location, name): \"\"\" :param",
"\"Name\", [\"Color\", 150, 5, 10, 20, 40, 80, 160]) for x in range(0,",
"read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return",
"information \"\"\" if isinstance(self.owner, Bank): owned_by = \"Owner: {0}, Current Rent {1}\" \\",
"\"\"\" self.is_mortgaged = False self.price = self.price * 2 self.owner = player self.owner.exchange_money(self.owner,",
"percent: (float or String) percent to tax the player \"\"\" self.percent = float(percent)",
"elif key == 6: return self.gain_money(50) elif key == 7: self.get_out_of_jail_free() elif key",
"Player that landed on the tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: #",
"80, 160]) for x in range(0, 40)} railroads = {x: Railroad(x, \"Name\") for",
"\"\\nYou're now on tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends",
"the player \"\"\" def __init__(self, location, name, amount): \"\"\" :param location: (int) Location,",
"the monopoly board :name: (String) Name of the location \"\"\" def __init__(self, location=0,",
"def landed_on(self, player): print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail)",
"multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all 4 railroads Attributes: ---- From Location Class",
"\"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output def",
"= True while loop_value: try: if get_yes_or_no_input('Would You Like To Use The Standard",
".format(self.location, self.name, self.amount) return output class PercentTax(Effect): \"\"\" Charges player a set tax",
"active player \"\"\" print(\"You've gained $\", amount) self.active_player.money += amount def lose_money(self, amount):",
":param player: (Player Object) Player that landed on tile \"\"\" num_utils_owned = 0",
"__init__(self, location, name, percent): \"\"\" :param location: (int) Location, (0 - 39) on",
"player.money - self.price self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price =",
"on the monopoly board :param name: (String) Name of the Tile \"\"\" self.active_player",
"jail free card \"\"\" print(\"You got a get out of jail free card\",",
"amount: (int) amount gotten from other players \"\"\" amount = amount * -1",
"cls.spaces: output = output + \"\\n\" + cls.spaces[key].__str__() return output # construct the",
"player back specified number of spaces and calls that tiles landed_on method. :param",
"location, name, amount): \"\"\" :param location: (int) Location, (0 - 39) on the",
"self.gain_money(200) elif key == 2: return self.lose_money(50) elif key == 3: return self.gain_money(50)",
"10} roll = randint(1, 6) for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned",
"loop_value: try: if get_yes_or_no_input('Would You Like To Use The Standard Board?'): file_name =",
"Sets Active player to player, then calls draw_card() :param player: (Player Object) Player",
"GO Tile Attributes: ---- From CornerTile Class ---- :location: (int) position, (0 -",
"Go()} elif class_type == 'JustVisiting': return {10: JustVisiting()} elif class_type == 'GoToJail': return",
"The player that landed on the tile \"\"\" if self.owner == Bank: self.owned_by_bank(player)",
"Creates all possible different tiles, used with read_in_board in Board \"\"\" @staticmethod def",
"\"\"\" Defines all 4 railroads Attributes: ---- From Location Class ---- :location: (int)",
"player): \"\"\" Takes amount from player and adds it to Free Parking :param",
"return output class Go(CornerTile): \"\"\" Models GO Tile Attributes: ---- From CornerTile Class",
"tile Attributes: ---- From CornerTile Class ---- :location: (int) position, (0 - 39),",
"return self.go_to_jail() elif key == 10: return self.house_repairs() elif key == 11: return",
"on who landed on the property and who owns the property :param player:",
"house \"\"\" def __init__(self, location, name, property_data): \"\"\" :param location: (Int) position on",
"Name of the location \"\"\" @abstractmethod def __init__(self, location, name): \"\"\" :param location:",
"in cls.spaces: output = output + \"\\n\" + cls.spaces[key].__str__() return output # construct",
"Tile \"\"\" self.active_player = None super().__init__(location, name) def landed_on(self, player): \"\"\" Sets Active",
"all other players specified amount :param amount: (int) amount to pay other players",
"to tax the player \"\"\" self.percent = float(percent) super().__init__(location, name) def landed_on(self, player):",
"class Go(CornerTile): \"\"\" Models GO Tile Attributes: ---- From CornerTile Class ---- :location:",
"player.money = player.money - self.price self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged = False",
"for key in Board.spaces: try: if Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses owed_money",
"not line.startswith('#'): data = line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except",
"tile to be added to the board \"\"\" while True: try: if data",
"except ValueError: last_part_data = [int(x) for x in data[4:]] data = [data[3], ]",
"quit() print(\"File Not found, please try again.\\n\\tOr Enter Q to quit\\n\") @classmethod def",
"utilities = {x: Utility(x, \"Name\") for x in [12, 28]} chances = {x:",
"data[2] try: data = [int(x) for x in data[3:]] except ValueError: last_part_data =",
".format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\" Parent Class For Each of the corner",
"Found!\") break except IndexError: data = None class SetTax(Effect): \"\"\" Charges player a",
"tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return: (String) Easy to read tile",
"rent for __str__ :return: (String) Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\"",
"advance_to_tile(self, tile_num): \"\"\" Moves player to specified tile and calls that tile's landed_on",
"of the location :price: (int) purchase cost of the tile :owner: (UserEntity Object)",
"{position: Property(position, name, data)} elif class_type == \"Utility\": return {position: Utility(position, name)} elif",
"int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines the price for one house based on",
"(int) amount gotten from other players \"\"\" amount = amount * -1 self.pay_all_other_players(amount)",
"def __init__(self, location, name, price): \"\"\" :param location: (int) Location, (0 - 39)",
"player: (Player Object) Player that landed on the tile \"\"\" buy_or_pass = self.ask_buy_or_pass()",
"self.is_mortgaged = True self.price = self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner = Bank",
"player money :param amount: (int) Amount of money to give active player \"\"\"",
"player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives the player the option to purchase",
"== \"PercentTax\": return {position: PercentTax(position, name, data[0])} elif class_type == \"FreeParking\": return {position:",
"== \"CommunityChest\": return {position: CommunityChest(position, name)} elif class_type == \"SetTax\": return {position: SetTax(position,",
"the tile \"\"\" @abstractmethod def __init__(self, location, name, price): \"\"\" :param location: (int)",
":return: (String) Easy to read tile description \"\"\" output = \"{0} {1}\" \\",
"x in range(0, 40)} railroads = {x: Railroad(x, \"Name\") for x in [5,",
"'jail' print(\"Go To Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location, name) def",
"{position: CommunityChest(position, name)} elif class_type == \"SetTax\": return {position: SetTax(position, name, data[0])} elif",
"(0 - 39) on the monopoly board :param name: (Optional, String, default=JustVisiting) Name",
"${2}\"\\ .format(self.location, self.name, self.amount) return output class PercentTax(Effect): \"\"\" Charges player a set",
"owner and player :param player: (Player Object) Player that landed on tile \"\"\"",
"Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances active player to the",
"* hold except AttributeError: # Corner Tiles have no attribute owner, skipped pass",
"\"Name\") for x in [5, 15, 25, 35]} utilities = {x: Utility(x, \"Name\")",
"+= amount def lose_money(self, amount): \"\"\" Takes player's money :param amount: (int) amount",
"name)} elif class_type == \"SetTax\": return {position: SetTax(position, name, data[0])} elif class_type ==",
"def format_current_rent(self): \"\"\" Formats Current rent for __str__ :return: (String) Current Rent \"\"\"",
"dependant on the player's wealth Attributes: ---- From Effect Class ---- :location: (int)",
":return: (Boolean) True if the player would like to buy False if the",
"name, price=150): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"+ ', ' owned_by = self.format_owner() output = \"{0} {1} {2}\" \\ \"\\n\\t{3}\"",
"Like To Use The Standard Board?'): file_name = 'StandardBoard' else: file_name = input(\"Please",
"1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class for all squares where an",
"monopoly board :param name: (Optional, String, default=GO) Name of the Tile \"\"\" super().__init__(location,",
"owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output def format_current_rent(self): \"\"\" Formats Current rent for",
"Name of the location :active_player: (Player Object) Player that the card will be",
"Tile :param price: (int) purchase cost of the tile \"\"\" self.location = location",
"Tile :param price: (Optional, int, default=200) purchase cost of the tile \"\"\" super().__init__(location,",
"data[0])} elif class_type == \"PercentTax\": return {position: PercentTax(position, name, data[0])} elif class_type ==",
"[int(x) for x in data[4:]] data = [data[3], ] + last_part_data if class_type",
"5: return self.advance_to_next(Railroad) elif key == 6: return self.gain_money(50) elif key == 7:",
"TODO: find a way to print out what players are in jail print(\"Just",
"for Chance and Community Chest Cards Attributes: ---- From Effect Class ---- :location:",
"# TODO: implement pay all other players print(\"Lucky for {} I don't know",
"key is the location of a tile and the content is the property",
"return self.go_to_jail() elif key == 6: return self.get_money_from_all_other_players(50) elif key == 7: return",
"number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the player to jail,",
"pay_all_other_players(self, amount): \"\"\" Active player pays all other players specified amount :param amount:",
"self.name, self.percent) class CornerTile(ABC): \"\"\" Parent Class For Each of the corner tiles",
"* -.75) def format_owner(self): \"\"\" Formats current owner information for __str__() :return: (String)",
"Object) Player that landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def",
"of the tile \"\"\" def __init__(self, location, name, price=150): \"\"\" :param location: (int)",
"isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines",
"{position: Chance(position, name)} elif class_type == \"CommunityChest\": return {position: CommunityChest(position, name)} elif class_type",
"'JustVisiting': return {10: JustVisiting()} elif class_type == 'GoToJail': return {30: GoToJail()} elif class_type",
":param location: (int) location on the board :return: (int) cost for one house",
"[5, 15, 25, 35]} utilities = {x: Utility(x, \"Name\") for x in [12,",
"ValueError: last_part_data = [int(x) for x in data[4:]] data = [data[3], ] +",
"randint from Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent Class for all",
"a board from file. Each line in the file should be formatted as",
"str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all utilities i.e. Electric Company and Water Works",
"Easy to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name,",
"in [2, 17, 33]} free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest)",
"to Bank, \"\"\" self.is_mortgaged = True self.price = self.price / 2 Bank.exchange_money(self.owner, self.price)",
"* 2 self.owner = player self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self): \"\"\" Formats",
"key = randint(0, 16) if key == 0: return self.advance_to_tile(0) elif key ==",
"input(\"Please enter the file Name: \") with open('boards/' + file_name) as file: for",
"self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent",
"player Charges Player unmortgage price :param player: (Player Object) Player that is unmortgageing",
"if passed_go: self.active_player.money += 200 print(\"You've advanced to the next \", str(class_type), \"\\n\\tTile",
"return self.gain_money(100) else: return print(\"Bad Chance Card Draw\") class CommunityChest(Card): \"\"\" All Community",
"16: return self.gain_money(100) else: print(\"bad CC draw\") class Board(object): \"\"\" The Monopoly Board",
"that sends people to jail \"\"\" def __init__(self, location=30, name='Go To Jail'): super().__init__(location,",
"* -1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String) Easy to read tile description",
"10: return 100 else: return 50 def __str__(self): \"\"\" :return: (String) Easy to",
"= self.format_owner() output = \"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House:",
":return: (String) Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all utilities",
"to be moved back \"\"\" self.active_player.position -= num_tiles print(\"You've been sent back \",",
"# buy player.money = player.money - self.price self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged",
"self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines the price",
"Electric Company and Water Works Attributes: ---- From Location Class ---- :location: (int)",
"tile \"\"\" self.is_mortgaged = False self.price = self.price * 2 self.owner = player",
"the key is the location of a tile and the content is the",
"Cards Attributes: ---- From Card Class ---- :location: (int) position, (0 - 39),",
"Each line in the file should be formatted as follows: Square# ClassType class",
"implement pay all other players print(\"Lucky for {} I don't know how to",
"gives it to free parking :param player: (Player Object) Player that landed on",
"elif class_type == \"PercentTax\": return {position: PercentTax(position, name, data[0])} elif class_type == \"FreeParking\":",
"moved back \"\"\" self.active_player.position -= num_tiles print(\"You've been sent back \", num_tiles, \"tiles.\",",
"1: return self.advance_to_tile(24) elif key == 2: return self.advance_to_tile(11) elif key == 3:",
"self.advance_to_next(Railroad) elif key == 5: return self.advance_to_next(Railroad) elif key == 6: return self.gain_money(50)",
"= False self.price = self.price * 2 self.owner = player self.owner.exchange_money(self.owner, self.price *",
"Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the player to jail, player does not pass",
"False if the player would not like to buy \"\"\" buy_or_pass = get_yes_or_no_input(",
"elif key == 2: return self.advance_to_tile(11) elif key == 3: return self.advance_to_next(Utility) elif",
"Bank, \"\"\" self.is_mortgaged = True self.price = self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner",
"== 13: return self.advance_to_tile(39) elif key == 14: return self.pay_all_other_players(50) elif key ==",
"self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\"",
"key == 14: return self.house_repairs() elif key == 15: return self.gain_money(10) elif key",
"self.price = price self.owner = Bank self.is_mortgaged = False super().__init__() def landed_on(self, player):",
"Number Of Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house,",
"New in SetTax Class ---- :amount: Amount to tax the player \"\"\" def",
"self.active_player.money += 200 print(\"You've advanced to the next \", str(class_type), \"\\n\\tTile Number: \",",
"mortgage state of the tile \"\"\" def __init__(self, location, name, price=200): \"\"\" :param",
"dictionary where the key is the location of a tile and the content",
"landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the player if",
"Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all utilities i.e. Electric Company",
"return self.gain_money(150) elif key == 16: return self.gain_money(100) else: return print(\"Bad Chance Card",
"== \"SetTax\": return {position: SetTax(position, name, data[0])} elif class_type == \"PercentTax\": return {position:",
"\"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4}, Number Of Houses: {5}\" \\ \"\\n\\tRent Tiers",
"output = output + \"\\n\" + cls.spaces[key].__str__() return output # construct the default",
"corner tiles Excluding Free Parking. Attributes: :location: (int) position, (0 - 39), on",
"data[0])} elif class_type == \"FreeParking\": return {position: FreeParking(position)} elif class_type == \"Go\": return",
"cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read in a board from",
"Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all",
"price): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"return output def format_current_rent(self): \"\"\" Formats Current rent for __str__ :return: (String) Current",
"self.number_of_houses, rent_tiers) return output def format_current_rent(self): \"\"\" Formats Current rent for __str__ :return:",
"self.advance_to_tile(0) elif key == 1: return self.gain_money(200) elif key == 2: return self.lose_money(50)",
"ClassType class data \"\"\" loop_value = True while loop_value: try: if get_yes_or_no_input('Would You",
"> 30: return 200 elif location > 20: return 150 elif location >",
"of jail free card \"\"\" print(\"You got a get out of jail free",
"tile \"\"\" if self.owner == Bank: self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player) def",
"\"\"\" Takes amount from player and adds it to Free Parking :param player:",
"key == 12: return self.lose_money(150) elif key == 13: return self.gain_money(25) elif key",
"= int(data[0]) class_type = data[1] name = data[2] try: data = [int(x) for",
"for all locations on the board Attributes: :location: (int) position, (0 - 39),",
"money :param amount: (int) Amount of money to give active player \"\"\" print(\"You've",
"\"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self): \"\"\" :return: (String) Easy to read",
"method. :param num_tiles: (int) Number of tiles to be moved back \"\"\" self.active_player.position",
"# TODO: find a way to print out what players are in jail",
"self.active_player.money += 200 self.active_player.position = tile_num print(\"You've been moved to :\", Board.spaces[tile_num], \"\\n\\tTile",
"import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent Class for all locations on the",
"self.location = location self.name = name self.price = price self.owner = Bank self.is_mortgaged",
"the property, displays the Name and price :return: (Boolean) True if the player",
"of money to give active player \"\"\" print(\"You've gained $\", amount) self.active_player.money +=",
"property :param player: (Player Object) The player that landed on the tile \"\"\"",
"Attributes: ---- From CornerTile Class ---- :location: (int) position, (0 - 39), on",
"= Bank def unmortgage(self, player): \"\"\" Sets is_mortgaged to False, Sets price to",
"that landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self): \"\"\"",
"the property :number_of_houses: (int) Number of houses on the property, 0 - 5",
"Zero is No houses Five is a hotel :cost_per_house: (int) Price of one",
"in PercentTax Class ---- :percent: percent to tax the player \"\"\" def __init__(self,",
"method :param tile_num: (int) Tile the active player will be moved to \"\"\"",
"that the card will be affecting \"\"\" def __init__(self, location, name): \"\"\" :param",
"to purchase the property, displays the Name and price :return: (Boolean) True if",
"\"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4}, Number Of Houses:",
"ask_buy_or_pass(self): \"\"\" Asks the player if they would like to purchase the property,",
":is_mortgaged: (Boolean) mortgage state of the tile \"\"\" def __init__(self, location, name, price=150):",
"\"\"\" super().__init__(location, name) def landed_on(self, player): print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\" Models",
"a get out of jail free card \"\"\" print(\"You got a get out",
"position = int(data[0]) class_type = data[1] name = data[2] try: data = [int(x)",
"from active player \"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\"",
"Go!\") class JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail) tile Attributes: ---- From CornerTile",
":return: (String) Easy to read owner information \"\"\" if isinstance(self.owner, Bank): owned_by =",
"elif key == 16: return self.gain_money(100) else: print(\"bad CC draw\") class Board(object): \"\"\"",
"if the tile is purchased, transfers money, updates owner, and sets is_mortgaged to",
"player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price = self.price * 2 def owned_by_player(self, player):",
"railroads Attributes: ---- From Location Class ---- :location: (int) position, (0 - 39),",
"else: return print(\"Bad Chance Card Draw\") class CommunityChest(Card): \"\"\" All Community Chest Cards",
"raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not Found!\") break except",
"Community Chest Cards Attributes: ---- From Card Class ---- :location: (int) position, (0",
"if get_yes_or_no_input('Would You Like To Use The Standard Board?'): file_name = 'StandardBoard' else:",
"Tile :param amount: (int) amount to tax the player \"\"\" self.amount = int(amount)",
"board :name: (String) Name of the location \"\"\" def __init__(self, location=0, name='GO'): \"\"\"",
"the board, int from 0 to 39 :param property_data: (1x9 array-like) list with",
"4 railroads Attributes: ---- From Location Class ---- :location: (int) position, (0 -",
"= 'jail' def house_repairs(self): \"\"\" Charges player house repairs \"\"\" owed_money = 0",
"Board Attributes: :spaces: (Dict) A dictionary where the key is the location of",
"property \"\"\" spaces = {} @classmethod def default_board(cls): \"\"\" Builds a default board",
"\"\"\" self.location = location self.name = name @abstractmethod def landed_on(self, player): pass def",
"with open('boards/' + file_name) as file: for line in file: if not line.startswith('#'):",
"money :param amount: (int) amount of money to take from active player \"\"\"",
"{1}, Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return owned_by def __str__(self): \"\"\" :return:",
"= {x: Property(x, \"Name\", [\"Color\", 150, 5, 10, 20, 40, 80, 160]) for",
"on tile \"\"\" num_utils_owned = 0 multiplier = {1: 4, 2: 10} roll",
"\"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all utilities i.e. Electric Company and",
"TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent Class for all locations on the board",
"output = \"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return output",
"self.lose_money(50) elif key == 3: return self.gain_money(50) elif key == 4: return self.get_out_of_jail_free()",
"Does not include railroads or utilities Attributes: ---- From Location Class ---- :location:",
"(String) Name of the Tile :param price: (Optional, int, default=200) purchase cost of",
"monopoly board :name: (String) Name of the location \"\"\" @abstractmethod def __init__(self, location,",
"5: return self.go_to_jail() elif key == 6: return self.get_money_from_all_other_players(50) elif key == 7:",
"from Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent Class for all locations",
"a tile based on the data provided :param data: Data read in from",
"@abstractmethod def __init__(self, location, name): \"\"\" :param location: (int) Location, (0 - 39)",
"self.house_repairs() elif key == 11: return self.lose_money(15) elif key == 12: return self.advance_to_tile(5)",
"landed on card tile :return: calls draw_card \"\"\" self.active_player = player return self.draw_card()",
"== 5: return self.go_to_jail() elif key == 6: return self.get_money_from_all_other_players(50) elif key ==",
"players :param amount: (int) amount gotten from other players \"\"\" amount = amount",
"\"PercentTax\": return {position: PercentTax(position, name, data[0])} elif class_type == \"FreeParking\": return {position: FreeParking(position)}",
"chances = {x: Chance(x, \"Chance Card\") for x in [7, 22, 36]} community_chest",
"= \"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\"",
"self.price, self.is_mortgaged) return owned_by def __str__(self): \"\"\" :return: (String) Easy to read tile",
"== 14: return self.house_repairs() elif key == 15: return self.gain_money(10) elif key ==",
"default=JustVisiting) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): # TODO:",
"other players specified amount :param amount: (int) amount to pay other players \"\"\"",
"super().__init__(location, name) def draw_card(self): \"\"\" Chooses a random random card and calls the",
"{20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read",
"if Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses owed_money += 25 * hold except",
"(Player Object) Player that landed on card tile :return: calls draw_card \"\"\" self.active_player",
"or String) percent to tax the player \"\"\" self.percent = float(percent) super().__init__(location, name)",
":param name: (String) Name of the Tile \"\"\" self.location = location self.name =",
"True self.active_player.position = location_to_check if passed_go: self.active_player.money += 200 print(\"You've advanced to the",
"40 passed_go = True self.active_player.position = location_to_check if passed_go: self.active_player.money += 200 print(\"You've",
"back specified number of spaces and calls that tiles landed_on method. :param num_tiles:",
"to jail, player does not pass go and does not collect $200 \"\"\"",
"used with read_in_board in Board \"\"\" @staticmethod def create_tile(data): \"\"\" Creates a tile",
"36]} community_chest = {x: CommunityChest(x, \"Community Chest Card\") for x in [2, 17,",
"draw_card(self): \"\"\" Chooses a random random card and calls the appropriate method \"\"\"",
"return self.gain_money(25) elif key == 14: return self.house_repairs() elif key == 15: return",
"True self.price = self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner = Bank def unmortgage(self,",
"SetTax Class ---- :amount: Amount to tax the player \"\"\" def __init__(self, location,",
"file_name == \"Q\": quit() print(\"File Not found, please try again.\\n\\tOr Enter Q to",
"again.\\n\\tOr Enter Q to quit\\n\") @classmethod def __str__(cls): \"\"\" :return: (String) Formatted __str__",
"GoToJail(CornerTile): \"\"\" Class that sends people to jail \"\"\" def __init__(self, location=30, name='Go",
"Formats current owner information for __str__() :return: (String) Easy to read owner information",
"location on the board :return: (int) cost for one house \"\"\" if location",
"(Boolean) True if the player would like to buy False if the player",
"location_to_check > 39: location_to_check = location_to_check % 40 passed_go = True self.active_player.position =",
"Location, (0 - 39) on the monopoly board :param name: (Optional, String, default=JustVisiting)",
"Name of the location \"\"\" def __init__(self, location=0, name='GO'): \"\"\" :param location: (int)",
"None super().__init__(location, name) def landed_on(self, player): \"\"\" Sets Active player to player, then",
"who landed on the property and who owns the property :param player: (Player",
"etc. Attributes: :location: (int) position, (0 - 39), on the monopoly board :name:",
"8: return self.gain_money(20) elif key == 9: return self.get_money_from_all_other_players(10) elif key == 10:",
"on the location :param location: (int) location on the board :return: (int) cost",
"a way to print out what players are in jail print(\"Just Visiting Jail\")",
"self.name, self.amount) return output class PercentTax(Effect): \"\"\" Charges player a set tax amount,",
"+ last_part_data if class_type == \"Property\": return {position: Property(position, name, data)} elif class_type",
"def landed_on(self, player): pass class Card(Effect): \"\"\" Parent Class for Chance and Community",
"and price :return: (Boolean) True if the player would like to buy False",
"Current Owner of the tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\"",
"15: return self.gain_money(10) elif key == 16: return self.gain_money(100) else: print(\"bad CC draw\")",
"name = data[2] try: data = [int(x) for x in data[3:]] except ValueError:",
"Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by = \"Owner: {0}, Price: {1}, Morgaged:",
"Works Attributes: ---- From Location Class ---- :location: (int) position, (0 - 39),",
"self.is_mortgaged) return owned_by def __str__(self): \"\"\" :return: (String) Easy to read tile description",
"@abstractmethod def landed_on(self, player): pass class Card(Effect): \"\"\" Parent Class for Chance and",
"Name of the location ---- New in SetTax Class ---- :amount: Amount to",
"tile \"\"\" num_utils_owned = 0 multiplier = {1: 4, 2: 10} roll =",
"\\ .format(self.owner, self.format_current_rent()) else: owned_by = \"Owner: {0}, Price: {1}, Morgaged: {2}\" \\",
"\"\"\" Advances active player to the next tile of specified class type :param",
":price: (int) purchase cost of the tile :owner: (UserEntity Object) Current Owner of",
"== 16: return self.gain_money(100) else: print(\"bad CC draw\") class Board(object): \"\"\" The Monopoly",
"draw_card() :param player: (Player Object) Player that landed on card tile :return: calls",
"Jail\") class GoToJail(CornerTile): \"\"\" Class that sends people to jail \"\"\" def __init__(self,",
"elif class_type == \"FreeParking\": return {position: FreeParking(position)} elif class_type == \"Go\": return {0:",
"super().__init__(location, name) def landed_on(self, player): \"\"\" Sets Active player to player, then calls",
"landed on the tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: # buy player.money",
"are in jail print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class that sends people",
"= player return self.draw_card() def draw_card(self): pass # -------------Card effects -------------- def advance_to_tile(self,",
"players are in jail print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class that sends",
"to free parking :param player: (Player Object) Player that landed on Percent Tax",
"\", data[1], \" Not Found!\") break except IndexError: data = None class SetTax(Effect):",
"board :param name: (String) Name of the Tile :param price: (Optional, int, default=200)",
"with read_in_board in Board \"\"\" @staticmethod def create_tile(data): \"\"\" Creates a tile based",
"def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between owner and player",
"tile_num print(\"You've been moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def",
"pay all other players print(\"Lucky for {} I don't know how to make",
"From Location Class ---- :location: (int) position, (0 - 39), on the monopoly",
"Tile Attributes: ---- From CornerTile Class ---- :location: (int) position, (0 - 39),",
"parking :param player: (Player Object) Player that landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player,",
"= player self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self): \"\"\" Formats current owner information",
"3: 150, 4: 200} for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned +=",
"based on the data provided :param data: Data read in from a file",
"10, 20, 40, 80, 160]) for x in range(0, 40)} railroads = {x:",
"on the player's wealth Attributes: ---- From Effect Class ---- :location: (int) position,",
"the next tile of specified class type :param class_type: (Object) class of tile",
"from player and adds it to Free Parking :param player: (Player Object) Player",
"def __init__(self, location, name, property_data): \"\"\" :param location: (Int) position on the board,",
"\", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles): \"\"\" Moves player back specified",
"\"\"\" :return: (String) Description of the tile \"\"\" output = \"{0} {1}\".format(self.location, self.name)",
"(int) amount to tax the player \"\"\" self.amount = int(amount) super().__init__(location, name) def",
":param player: (Player Object) Player that landed on tile \"\"\" num_railroads_owned = 0",
"name)} elif class_type == \"Chance\": return {position: Chance(position, name)} elif class_type == \"CommunityChest\":",
"Player that landed on card tile :return: calls draw_card \"\"\" self.active_player = player",
"\", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the player to jail, player",
"added to the board \"\"\" while True: try: if data is not None:",
"from random import randint from Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent",
"tile based on the data provided :param data: Data read in from a",
"name: (String) Name of the Tile \"\"\" self.active_player = None super().__init__(location, name) def",
"\"\"\" Chooses a random random card and calls the appropriate method \"\"\" key",
"= True self.price = self.price / 2 Bank.exchange_money(self.owner, self.price) self.owner = Bank def",
"all other players :param amount: (int) amount gotten from other players \"\"\" amount",
"100 else: return 50 def __str__(self): \"\"\" :return: (String) Easy to read tile",
"of the corner tiles Excluding Free Parking. Attributes: :location: (int) position, (0 -",
"yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active player gets money from all other players",
"players specified amount :param amount: (int) amount to pay other players \"\"\" #",
"num_tiles): \"\"\" Moves player back specified number of spaces and calls that tiles",
"and who owns the property :param player: (Player Object) The player that landed",
"self.gain_money(10) elif key == 16: return self.gain_money(100) else: print(\"bad CC draw\") class Board(object):",
"class_type == \"PercentTax\": return {position: PercentTax(position, name, data[0])} elif class_type == \"FreeParking\": return",
"of the tile ---- New in Property Class ---- :color: (String) Color of",
"now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles): \"\"\" Moves player",
"name: (Optional, String, default=GO) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self,",
"key == 1: return self.advance_to_tile(24) elif key == 2: return self.advance_to_tile(11) elif key",
"(Player Object) Player that landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\"",
"# Checks if player will pass go if self.active_player.position >= tile_num: self.active_player.money +=",
"\"\"\" The Monopoly Board Attributes: :spaces: (Dict) A dictionary where the key is",
"150, 5, 10, 20, 40, 80, 160]) for x in range(0, 40)} railroads",
"the player to jail, player does not pass go and does not collect",
"elif key == 15: return self.gain_money(150) elif key == 16: return self.gain_money(100) else:",
"== 7: return self.gain_money(100) elif key == 8: return self.gain_money(20) elif key ==",
"Player that landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return: (String)",
"for the property :number_of_houses: (int) Number of houses on the property, 0 -",
"data = None class SetTax(Effect): \"\"\" Charges player a set tax amount, is",
"Player unmortgage price :param player: (Player Object) Player that is unmortgageing the tile",
"2: 10} roll = randint(1, 6) for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility):",
"'' for key in cls.spaces: output = output + \"\\n\" + cls.spaces[key].__str__() return",
"the property \"\"\" spaces = {} @classmethod def default_board(cls): \"\"\" Builds a default",
":param property_data: (1x9 array-like) list with various data formatted as follows [\"Color\", Price,",
"== \"FreeParking\": return {position: FreeParking(position)} elif class_type == \"Go\": return {0: Go()} elif",
"\"\"\" self.amount = int(amount) super().__init__(location, name) def landed_on(self, player): \"\"\" Takes amount from",
"num_utils_owned = 0 multiplier = {1: 4, 2: 10} roll = randint(1, 6)",
":param name: (Optional, String, default=GO) Name of the Tile \"\"\" super().__init__(location, name) def",
"the Tile :param price: (int) purchase cost of the tile \"\"\" self.location =",
"Name and price :return: (Boolean) True if the player would like to buy",
":param tile_num: (int) Tile the active player will be moved to \"\"\" #",
"def go_to_jail(self): \"\"\" Sends the player to jail, player does not pass go",
"A tile to be added to the board \"\"\" while True: try: if",
"str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give player",
"return self.advance_to_next(Utility) elif key == 4: return self.advance_to_next(Railroad) elif key == 5: return",
"wealth Attributes: ---- From Effect Class ---- :location: (int) position, (0 - 39),",
"return self.draw_card() def draw_card(self): pass # -------------Card effects -------------- def advance_to_tile(self, tile_num): \"\"\"",
"tile_num: (int) Tile the active player will be moved to \"\"\" # Checks",
"for one house \"\"\" if location > 30: return 200 elif location >",
"return self.gain_money(200) elif key == 2: return self.lose_money(50) elif key == 3: return",
"monopoly board :param name: (String) Name of the Tile \"\"\" self.active_player = None",
"__init__(self, location=0, name='GO'): \"\"\" :param location: (int) Location, (0 - 39) on the",
"== self.active_player: hold = Board.spaces[key].number_of_houses owed_money += 25 * hold except AttributeError: #",
"draw_card \"\"\" self.active_player = player return self.draw_card() def draw_card(self): pass # -------------Card effects",
"all 4 railroads Attributes: ---- From Location Class ---- :location: (int) position, (0",
"that landed on tile \"\"\" num_railroads_owned = 0 cost = {1: 50, 2:",
"and adds it to Free Parking :param player: (Player Object) Player that landed",
"\"\"\" :return: (String) Easy to read tile description \"\"\" output = \"{0} {1}\".format(self.location,",
"return self.get_out_of_jail_free() elif key == 5: return self.go_to_jail() elif key == 6: return",
"amount): \"\"\" Active player gets money from all other players :param amount: (int)",
"rent_tiers) return output def format_current_rent(self): \"\"\" Formats Current rent for __str__ :return: (String)",
"know how to make you pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\"",
"the option to purchase the tile, if the tile is purchased, transfers money,",
"name) def landed_on(self, player): player.position = 'jail' print(\"Go To Jail!!!\") class Jail(CornerTile): def",
"The Standard Board?'): file_name = 'StandardBoard' else: file_name = input(\"Please enter the file",
"the tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: # buy player.money = player.money",
"\"\"\" # TODO: implement pay all other players print(\"Lucky for {} I don't",
"class_type == 'JustVisiting': return {10: JustVisiting()} elif class_type == 'GoToJail': return {30: GoToJail()}",
"= location self.name = name @abstractmethod def landed_on(self, player): pass class Card(Effect): \"\"\"",
"= line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except FileNotFoundError: if file_name",
"house_repairs(self): \"\"\" Charges player house repairs \"\"\" owed_money = 0 for key in",
"data = [int(x) for x in data[3:]] except ValueError: last_part_data = [int(x) for",
"moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\"",
"to Free Parking :param player: (Player Object) Player that landed on tile \"\"\"",
"class_type == \"Railroad\": return {position: Railroad(position, name)} elif class_type == \"Chance\": return {position:",
"False while not isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1 if location_to_check > 39:",
"all possible different tiles, used with read_in_board in Board \"\"\" @staticmethod def create_tile(data):",
"Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives player a get out of jail free",
"is unmortgageing the tile \"\"\" self.is_mortgaged = False self.price = self.price * 2",
"__init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location: (int) Location, (0 - 39) on the",
"player self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self): \"\"\" Formats current owner information for",
"\"\"\" Builds a default board for testing \"\"\" cls.spaces = {} streets =",
"in data[4:]] data = [data[3], ] + last_part_data if class_type == \"Property\": return",
"\"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give player money",
"[7, 22, 36]} community_chest = {x: CommunityChest(x, \"Community Chest Card\") for x in",
"provided :param data: Data read in from a file :return: A tile to",
"\"\"\" def __init__(self, location=30, name='Go To Jail'): super().__init__(location, name) def landed_on(self, player): player.position",
"[int(x) for x in data[3:]] except ValueError: last_part_data = [int(x) for x in",
"railroads or utilities Attributes: ---- From Location Class ---- :location: (int) position, (0",
"Creates a tile based on the data provided :param data: Data read in",
"to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner())",
"to print out what players are in jail print(\"Just Visiting Jail\") class GoToJail(CornerTile):",
"specified amount :param amount: (int) amount to pay other players \"\"\" # TODO:",
"that is unmortgageing the tile \"\"\" self.is_mortgaged = False self.price = self.price *",
"== 13: return self.gain_money(25) elif key == 14: return self.house_repairs() elif key ==",
"self.name = name @abstractmethod def landed_on(self, player): pass def __str__(self): \"\"\" :return: (String)",
"Board(object): \"\"\" The Monopoly Board Attributes: :spaces: (Dict) A dictionary where the key",
"option to purchase the tile, if the tile is purchased, transfers money, updates",
"Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers)",
"key == 2: return self.lose_money(50) elif key == 3: return self.gain_money(50) elif key",
"return {position: SetTax(position, name, data[0])} elif class_type == \"PercentTax\": return {position: PercentTax(position, name,",
"on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return: (String) Easy to read",
"\"\\n\" + cls.spaces[key].__str__() return output # construct the default board for testing Board.default_board()",
"been sent to jail!!\") self.active_player.position = 'jail' def house_repairs(self): \"\"\" Charges player house",
"try again.\\n\\tOr Enter Q to quit\\n\") @classmethod def __str__(cls): \"\"\" :return: (String) Formatted",
"on the monopoly board :param name: (String) Name of the Tile :param amount:",
"the tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" def __init__(self, location,",
"(Player Object) Player that landed on tile \"\"\" num_utils_owned = 0 multiplier =",
"self.cost_per_house, self.number_of_houses, rent_tiers) return output def format_current_rent(self): \"\"\" Formats Current rent for __str__",
"class_type = data[1] name = data[2] try: data = [int(x) for x in",
"print(\"Lucky for you, you have no houses\") else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player,",
"35]} utilities = {x: Utility(x, \"Name\") for x in [12, 28]} chances =",
"return self.go_back(3) elif key == 9: return self.go_to_jail() elif key == 10: return",
"location=30, name='Go To Jail'): super().__init__(location, name) def landed_on(self, player): player.position = 'jail' print(\"Go",
"key == 5: return self.go_to_jail() elif key == 6: return self.get_money_from_all_other_players(50) elif key",
":param amount: (int) Amount of money to give active player \"\"\" print(\"You've gained",
"the monopoly board :param name: (String) Name of the Tile :param price: (Optional,",
"like to buy \" + self.name + \" for $\" + str(self.price) +",
"key == 15: return self.gain_money(10) elif key == 16: return self.gain_money(100) else: print(\"bad",
"wealth and gives it to free parking :param player: (Player Object) Player that",
"(Optional, int, default=200) purchase cost of the tile \"\"\" super().__init__(location, name, price) def",
"__str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" output = \"{0}",
"Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self): \"\"\" :return: (String) Easy to",
"(Object) class of tile to advance to examples: Railroad, Utility, Card \"\"\" location_to_check",
"player \"\"\" def __init__(self, location, name, percent): \"\"\" :param location: (int) Location, (0",
"33]} free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def",
"39) on the monopoly board :param name: (Optional, String, default=JustVisiting) Name of the",
"the tile \"\"\" def __init__(self, location, name, price=150): \"\"\" :param location: (int) Location,",
"location > 20: return 150 elif location > 10: return 100 else: return",
"def go_back(self, num_tiles): \"\"\" Moves player back specified number of spaces and calls",
"= False self.price = self.price * 2 def owned_by_player(self, player): \"\"\" Charges player",
"key in cls.spaces: output = output + \"\\n\" + cls.spaces[key].__str__() return output #",
"\"\"\" def __init__(self, location, name, price=200): \"\"\" :param location: (int) Location, (0 -",
"SetTax(Effect): \"\"\" Charges player a set tax amount, is not dependant on the",
"\"Community Chest Card\") for x in [2, 17, 33]} free_parking = {20: FreeParking()}",
"tiles Excluding Free Parking. Attributes: :location: (int) position, (0 - 39), on the",
"pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active player gets money from",
"the card will be affecting \"\"\" def __init__(self, location, name): \"\"\" :param location:",
"== 14: return self.pay_all_other_players(50) elif key == 15: return self.gain_money(150) elif key ==",
"location :param location: (int) location on the board :return: (int) cost for one",
"card tile :return: calls draw_card \"\"\" self.active_player = player return self.draw_card() def draw_card(self):",
"price to full price Sets owner to player Charges Player unmortgage price :param",
"the monopoly board :name: (String) Name of the location :active_player: (Player Object) Player",
"print(\"Lucky for {} I don't know how to make you pay everyone else...",
"14: return self.pay_all_other_players(50) elif key == 15: return self.gain_money(150) elif key == 16:",
"cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read in a board from file.",
"the tile \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Go(CornerTile): \"\"\"",
"= player.money - self.price self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price",
"location, name, percent): \"\"\" :param location: (int) Location, (0 - 39) on the",
"AttributeError: # Corner Tiles have no attribute owner, skipped pass print(\"House repairs are",
"0 multiplier = {1: 4, 2: 10} roll = randint(1, 6) for key",
"All Chance Cards Attributes: ---- From Card Class ---- :location: (int) position, (0",
"is_mortgaged to False :param player: (Player Object) Player that landed on the tile",
"+= 25 * hold except AttributeError: # Corner Tiles have no attribute owner,",
"self.house_repairs() elif key == 15: return self.gain_money(10) elif key == 16: return self.gain_money(100)",
"the price for one house based on the location :param location: (int) location",
"= self.price * 2 def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent",
"print(\"You've been sent back \", num_tiles, \"tiles.\", \"\\nYou're now on tile number: \",",
"Name of the Tile :param price: (int) purchase cost of the tile \"\"\"",
"7: self.get_out_of_jail_free() elif key == 8: return self.go_back(3) elif key == 9: return",
"== 6: return self.get_money_from_all_other_players(50) elif key == 7: return self.gain_money(100) elif key ==",
"lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives player a get out",
"def __init__(self, location, name, price=200): \"\"\" :param location: (int) Location, (0 - 39)",
"make you pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active player gets",
"all objects in spaces \"\"\" output = '' for key in cls.spaces: output",
"state of the tile \"\"\" def __init__(self, location, name, price=200): \"\"\" :param location:",
"Tiles have no attribute owner, skipped pass print(\"House repairs are expensive!\") if owed_money",
"buy player.money = player.money - self.price self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged =",
"buy_or_pass = get_yes_or_no_input( \"Would you like to buy \" + self.name + \"",
"house based on the location :param location: (int) location on the board :return:",
"landed on tile \"\"\" num_railroads_owned = 0 cost = {1: 50, 2: 100,",
"= '' for tier in self.rent: rent_tiers += str(tier) + ', ' owned_by",
"monopoly board :param name: (String) Name of the Tile :param price: (Optional, int,",
"data[1] name = data[2] try: data = [int(x) for x in data[3:]] except",
"self}) self.is_mortgaged = False self.price = self.price * 2 def owned_by_player(self, player): \"\"\"",
"specified tile and calls that tile's landed_on method :param tile_num: (int) Tile the",
"== 1: return self.gain_money(200) elif key == 2: return self.lose_money(50) elif key ==",
":active_player: (Player Object) Player that the card will be affecting \"\"\" def __init__(self,",
"None class SetTax(Effect): \"\"\" Charges player a set tax amount, is not dependant",
"landed on the property and who owns the property :param player: (Player Object)",
"pays all other players specified amount :param amount: (int) amount to pay other",
"between owner and player :param player: (Player Object) Player that landed on tile",
"in data[3:]] except ValueError: last_part_data = [int(x) for x in data[4:]] data =",
"key == 10: return self.house_repairs() elif key == 11: return self.lose_money(15) elif key",
"return self.advance_to_tile(0) elif key == 1: return self.gain_money(200) elif key == 2: return",
"construct the default board for testing Board.default_board() class TileFactory: \"\"\" Creates all possible",
"amount): \"\"\" Active player pays all other players specified amount :param amount: (int)",
"a set tax amount, is not dependant on the player's wealth Attributes: ----",
"__init__(self, location, name, price=200): \"\"\" :param location: (int) Location, (0 - 39) on",
":param player: (Player Object) Player that is unmortgageing the tile \"\"\" self.is_mortgaged =",
"1: return self.gain_money(200) elif key == 2: return self.lose_money(50) elif key == 3:",
"def __init__(self, location, name, percent): \"\"\" :param location: (int) Location, (0 - 39)",
"pass # -------------Card effects -------------- def advance_to_tile(self, tile_num): \"\"\" Moves player to specified",
"board :param name: (String) Name of the Tile :param amount: (int) amount to",
"elif class_type == \"CommunityChest\": return {position: CommunityChest(position, name)} elif class_type == \"SetTax\": return",
"current owner information for __str__() :return: (String) Easy to read owner information \"\"\"",
"0 to 39 :param property_data: (1x9 array-like) list with various data formatted as",
"Go(CornerTile): \"\"\" Models GO Tile Attributes: ---- From CornerTile Class ---- :location: (int)",
"= {1: 4, 2: 10} roll = randint(1, 6) for key in self.owner.owned_properites:",
"New in PercentTax Class ---- :percent: percent to tax the player \"\"\" def",
"Data read in from a file :return: A tile to be added to",
"in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class",
"landed_on(self, player): # TODO: find a way to print out what players are",
"now on tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the",
":param location: (int) Location, (0 - 39) on the monopoly board :param name:",
"num_railroads_owned += 1 self.owner.exchange_money(player, cost[num_railroads_owned]) class Effect(ABC): \"\"\" Parent class for all squares",
"Parking :param player: (Player Object) Player that landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount)",
"print out what players are in jail print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\"",
"sends people to jail \"\"\" def __init__(self, location=30, name='Go To Jail'): super().__init__(location, name)",
"(String) Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all utilities i.e.",
"self.format_current_rent()) else: owned_by = \"Owner: {0}, Price: {1}, Morgaged: {2}\" \\ .format(self.owner, self.price,",
"to read tile description \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class",
"0: print(\"Lucky for you, you have no houses\") else: print(\"You paid: $\", owed_money)",
"player :param player: (Player Object) Player that landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses])",
"== 15: return self.gain_money(10) elif key == 16: return self.gain_money(100) else: print(\"bad CC",
"@classmethod def read_in_board(cls): \"\"\" read in a board from file. Each line in",
"been sent back \", num_tiles, \"tiles.\", \"\\nYou're now on tile number: \", self.active_player.position)",
"key == 3: return self.gain_money(50) elif key == 4: return self.get_out_of_jail_free() elif key",
"be moved back \"\"\" self.active_player.position -= num_tiles print(\"You've been sent back \", num_tiles,",
"rent, rent_1_house, ..., rent_hotel] \"\"\" self.color = property_data[0] self.rent = property_data[2:] self.number_of_houses =",
"{x: Chance(x, \"Chance Card\") for x in [7, 22, 36]} community_chest = {x:",
"last_part_data = [int(x) for x in data[4:]] data = [data[3], ] + last_part_data",
"\"\"\" :param location: (int) Location, (0 - 39) on the monopoly board :param",
"def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" rent_tiers =",
"__init__(self, location, name): \"\"\" :param location: (int) Location, (0 - 39) on the",
"\"\"\" spaces = {} @classmethod def default_board(cls): \"\"\" Builds a default board for",
"the active player will be moved to \"\"\" # Checks if player will",
"\"\"\" print(\"Oh No! you've been sent to jail!!\") self.active_player.position = 'jail' def house_repairs(self):",
"return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all utilities i.e. Electric Company and Water",
"class_type == \"Property\": return {position: Property(position, name, data)} elif class_type == \"Utility\": return",
"cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read in a board from file. Each line",
"Price: {1}, Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return owned_by def __str__(self): \"\"\"",
"with various data formatted as follows [\"Color\", Price, rent, rent_1_house, ..., rent_hotel] \"\"\"",
"self.amount = int(amount) super().__init__(location, name) def landed_on(self, player): \"\"\" Takes amount from player",
"39), on the monopoly board :name: (String) Name of the location \"\"\" @abstractmethod",
"and calls that tiles landed_on method. :param num_tiles: (int) Number of tiles to",
"'StandardBoard' else: file_name = input(\"Please enter the file Name: \") with open('boards/' +",
"key == 6: return self.get_money_from_all_other_players(50) elif key == 7: return self.gain_money(100) elif key",
"description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent)",
"---- New in Property Class ---- :color: (String) Color of the property :rent:",
"Chest Card\") for x in [2, 17, 33]} free_parking = {20: FreeParking()} cls.spaces.update(streets)",
"the location ---- New in PercentTax Class ---- :percent: percent to tax the",
"buy False if the player would not like to buy \"\"\" buy_or_pass =",
"open('boards/' + file_name) as file: for line in file: if not line.startswith('#'): data",
"other players :param amount: (int) amount gotten from other players \"\"\" amount =",
"calls draw_card \"\"\" self.active_player = player return self.draw_card() def draw_card(self): pass # -------------Card",
"the board Attributes: :location: (int) position, (0 - 39), on the monopoly board",
"__str__() :return: (String) Easy to read owner information \"\"\" if isinstance(self.owner, Bank): owned_by",
"\\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output",
"Models Just Visiting (jail) tile Attributes: ---- From CornerTile Class ---- :location: (int)",
"the tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" @abstractmethod def __init__(self,",
"CornerTile(ABC): \"\"\" Parent Class For Each of the corner tiles Excluding Free Parking.",
"monopoly board :param name: (String) Name of the Tile :param price: (int) purchase",
"\"\"\" output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class Property(Location):",
"player.position = 'jail' print(\"Go To Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location,",
"(Player Object) The player that landed on the tile \"\"\" if self.owner ==",
"the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self, player): \"\"\" Charges player rent,",
"out what players are in jail print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class",
"self.price * 2 self.owner = player self.owner.exchange_money(self.owner, self.price * -.75) def format_owner(self): \"\"\"",
"tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances active player to the next",
"FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read in",
"39), on the monopoly board :name: (String) Name of the location \"\"\" def",
"self.format_owner() output = \"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost Per House: {4},",
"Chest Cards Attributes: ---- From Effect Class ---- :location: (int) position, (0 -",
"tile \"\"\" def __init__(self, location, name, price=200): \"\"\" :param location: (int) Location, (0",
"self.active_player = None super().__init__(location, name) def landed_on(self, player): \"\"\" Sets Active player to",
"class PercentTax(Effect): \"\"\" Charges player a set tax amount, is not dependant on",
"To Jail'): super().__init__(location, name) def landed_on(self, player): player.position = 'jail' print(\"Go To Jail!!!\")",
"1 def go_back(self, num_tiles): \"\"\" Moves player back specified number of spaces and",
"tiles, used with read_in_board in Board \"\"\" @staticmethod def create_tile(data): \"\"\" Creates a",
"Railroad(position, name)} elif class_type == \"Chance\": return {position: Chance(position, name)} elif class_type ==",
"\\ \"\\n\\tCost Per House: {4}, Number Of Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\",
"number of spaces and calls that tiles landed_on method. :param num_tiles: (int) Number",
"\"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: # buy player.money = player.money - self.price",
"board :param name: (String) Name of the Tile \"\"\" self.active_player = None super().__init__(location,",
"purchased, transfers money, updates owner, and sets is_mortgaged to False :param player: (Player",
"name=\"JustVisiting\"): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"= \"Owner: {0}, Current Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by = \"Owner:",
"on the monopoly board :name: (String) Name of the location \"\"\" @abstractmethod def",
"{1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class Property(Location): \"\"\" Defines all the",
"\"\"\" @abstractmethod def __init__(self, location, name, price): \"\"\" :param location: (int) Location, (0",
"elif key == 7: return self.gain_money(100) elif key == 8: return self.gain_money(20) elif",
"class_type == \"FreeParking\": return {position: FreeParking(position)} elif class_type == \"Go\": return {0: Go()}",
"{4}, Number Of Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by,",
"50 def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" rent_tiers",
"Chance Cards Attributes: ---- From Card Class ---- :location: (int) position, (0 -",
"super().__init__() def landed_on(self, player): \"\"\" Calls the proper function depending on who landed",
"line in file: if not line.startswith('#'): data = line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile)",
"landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return: (String) Easy to",
"\"\"\" location_to_check = self.active_player.position + 1 passed_go = False while not isinstance( Board.spaces[location_to_check],",
"proper function depending on who landed on the property and who owns the",
"transfers rent between owner and player :param player: (Player Object) Player that landed",
"way to print out what players are in jail print(\"Just Visiting Jail\") class",
"6: return self.gain_money(50) elif key == 7: self.get_out_of_jail_free() elif key == 8: return",
"$200 \"\"\" print(\"Oh No! you've been sent to jail!!\") self.active_player.position = 'jail' def",
"Property(x, \"Name\", [\"Color\", 150, 5, 10, 20, 40, 80, 160]) for x in",
"num_utils_owned += 1 self.owner.exchange_money(player, roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all 4",
"data formatted as follows [\"Color\", Price, rent, rent_1_house, ..., rent_hotel] \"\"\" self.color =",
"(Player Object) Player that landed on Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent)",
"Object) Player that landed on tile \"\"\" Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return:",
".format(self.owner, self.format_current_rent()) else: owned_by = \"Owner: {0}, Price: {1}, Morgaged: {2}\" \\ .format(self.owner,",
"elif key == 15: return self.gain_money(10) elif key == 16: return self.gain_money(100) else:",
"tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output",
"@abstractmethod def landed_on(self, player): pass def __str__(self): \"\"\" :return: (String) Description of the",
"== 3: return self.advance_to_next(Utility) elif key == 4: return self.advance_to_next(Railroad) elif key ==",
"board :return: (int) cost for one house \"\"\" if location > 30: return",
"= {1: 50, 2: 100, 3: 150, 4: 200} for key in self.owner.owned_properites:",
"\"\"\" @staticmethod def create_tile(data): \"\"\" Creates a tile based on the data provided",
"(int) position, (0 - 39), on the monopoly board :name: (String) Name of",
"tile \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Go(CornerTile): \"\"\" Models",
"the proper function depending on who landed on the property and who owns",
"to take from active player \"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def",
"from a file :return: A tile to be added to the board \"\"\"",
"the next \", str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount):",
"40)} railroads = {x: Railroad(x, \"Name\") for x in [5, 15, 25, 35]}",
"to False :param player: (Player Object) Player that landed on the tile \"\"\"",
"key == 4: return self.advance_to_next(Railroad) elif key == 5: return self.advance_to_next(Railroad) elif key",
"elif key == 11: return self.lose_money(50) elif key == 12: return self.lose_money(150) elif",
"player): \"\"\" Sets Active player to player, then calls draw_card() :param player: (Player",
"be moved to \"\"\" # Checks if player will pass go if self.active_player.position",
"percent): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"purchase the property, displays the Name and price :return: (Boolean) True if the",
"UserEntity import Player, Bank, FreeParking from InputValidation import get_yes_or_no_input from random import randint",
"elif class_type == \"SetTax\": return {position: SetTax(position, name, data[0])} elif class_type == \"PercentTax\":",
"else: file_name = input(\"Please enter the file Name: \") with open('boards/' + file_name)",
"to advance to examples: Railroad, Utility, Card \"\"\" location_to_check = self.active_player.position + 1",
"(String) Name of the location ---- New In Card Class ---- :active_player: (Player",
"TODO: implement pay all other players print(\"Lucky for {} I don't know how",
"player \"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives player",
"the tile is purchased, transfers money, updates owner, and sets is_mortgaged to False",
"(String) Name of the Tile :param percent: (float or String) percent to tax",
"- 39), on the monopoly board :name: (String) Name of the location :active_player:",
"passed_go: self.active_player.money += 200 print(\"You've advanced to the next \", str(class_type), \"\\n\\tTile Number:",
":param data: Data read in from a file :return: A tile to be",
"= self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines the price for",
"state of the tile ---- New in Property Class ---- :color: (String) Color",
"location: (int) location on the board :return: (int) cost for one house \"\"\"",
"Board.spaces[key].number_of_houses owed_money += 25 * hold except AttributeError: # Corner Tiles have no",
"class TileFactory: \"\"\" Creates all possible different tiles, used with read_in_board in Board",
":param player: (Player Object) The player that landed on the tile \"\"\" if",
"the file Name: \") with open('boards/' + file_name) as file: for line in",
"pass class Card(Effect): \"\"\" Parent Class for Chance and Community Chest Cards Attributes:",
"False, Sets price to full price Sets owner to player Charges Player unmortgage",
"owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\" Active player pays all other players",
"the player if they would like to purchase the property, displays the Name",
"on the monopoly board :param name: (Optional, String, default=JustVisiting) Name of the Tile",
"def landed_on(self, player): \"\"\" Sets Active player to player, then calls draw_card() :param",
"(String) Name of the Tile \"\"\" super().__init__(location, name) def draw_card(self): \"\"\" Chooses a",
"amount, is not dependant on the player's wealth Attributes: ---- From Effect Class",
"a default board for testing \"\"\" cls.spaces = {} streets = {x: Property(x,",
"\"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return output class PercentTax(Effect):",
"all locations on the board Attributes: :location: (int) position, (0 - 39), on",
"elif key == 9: return self.get_money_from_all_other_players(10) elif key == 10: return self.gain_money(100) elif",
"+ cls.spaces[key].__str__() return output # construct the default board for testing Board.default_board() class",
"\"\"\" :param location: (Int) position on the board, int from 0 to 39",
"name) def landed_on(self, player): \"\"\" Charges player percent of their total wealth and",
"of jail free card\", \"\\n\\t you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1",
"class_type == 'jail': return {'jail': Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type:",
"= name @abstractmethod def landed_on(self, player): pass def __str__(self): \"\"\" :return: (String) Description",
"file. Each line in the file should be formatted as follows: Square# ClassType",
"Active player to player, then calls draw_card() :param player: (Player Object) Player that",
":return: calls draw_card \"\"\" self.active_player = player return self.draw_card() def draw_card(self): pass #",
"card\", \"\\n\\t you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles):",
"for x in [2, 17, 33]} free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities)",
"tier in self.rent: rent_tiers += str(tier) + ', ' owned_by = self.format_owner() output",
"to True, Gives owner mortgage value (1/2 price), Sets price to 1/2 price,",
"= player player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price = self.price * 2 def",
"the property and who owns the property :param player: (Player Object) The player",
"self.name, self.format_owner()) return output class Property(Location): \"\"\" Defines all the Properties on the",
"data[1], \" Not Found!\") break except IndexError: data = None class SetTax(Effect): \"\"\"",
"[\"Color\", Price, rent, rent_1_house, ..., rent_hotel] \"\"\" self.color = property_data[0] self.rent = property_data[2:]",
"the monopoly board :param name: (String) Name of the Tile :param amount: (int)",
"self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" output",
"go_back(self, num_tiles): \"\"\" Moves player back specified number of spaces and calls that",
"= \"{0} {1}\".format(self.location, self.name) return output class Chance(Card): \"\"\" All Chance Cards Attributes:",
"tile to advance to examples: Railroad, Utility, Card \"\"\" location_to_check = self.active_player.position +",
"key == 13: return self.gain_money(25) elif key == 14: return self.house_repairs() elif key",
"from abc import ABC, abstractmethod from UserEntity import Player, Bank, FreeParking from InputValidation",
"the board Does not include railroads or utilities Attributes: ---- From Location Class",
"don't know how to make you pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount):",
"squares where an effect is applied. Including Chance, Community Chest, Income tax, etc.",
"Parking. Attributes: :location: (int) position, (0 - 39), on the monopoly board :name:",
"location self.name = name @abstractmethod def landed_on(self, player): pass def __str__(self): \"\"\" :return:",
"if data is not None: position = int(data[0]) class_type = data[1] name =",
"6) for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player, roll",
"Builds a default board for testing \"\"\" cls.spaces = {} streets = {x:",
"locations on the board Attributes: :location: (int) position, (0 - 39), on the",
"of the tile \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Go(CornerTile):",
"that landed on tile \"\"\" num_utils_owned = 0 multiplier = {1: 4, 2:",
"tax the player \"\"\" self.percent = float(percent) super().__init__(location, name) def landed_on(self, player): \"\"\"",
"Morgaged: {2}\" \\ .format(self.owner, self.price, self.is_mortgaged) return owned_by def __str__(self): \"\"\" :return: (String)",
"{x: Utility(x, \"Name\") for x in [12, 28]} chances = {x: Chance(x, \"Chance",
"\"Utility\": return {position: Utility(position, name)} elif class_type == \"Railroad\": return {position: Railroad(position, name)}",
"and player :param player: (Player Object) Player that landed on tile \"\"\" self.owner.exchange_money(player,",
"\", str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give",
"* self.percent) def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\"",
"Name of the Tile \"\"\" self.location = location self.name = name @abstractmethod def",
":name: (String) Name of the location \"\"\" def __init__(self, location=0, name='GO'): \"\"\" :param",
"moved to \"\"\" # Checks if player will pass go if self.active_player.position >=",
"on the monopoly board :name: (String) Name of the location \"\"\" def __init__(self,",
"pass def __str__(self): \"\"\" :return: (String) Description of the tile \"\"\" output =",
"Sets is_mortgaged to True, Gives owner mortgage value (1/2 price), Sets price to",
"- 39) on the monopoly board :param name: (Optional, String, default=GO) Name of",
"cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read in a board",
"in file: if not line.startswith('#'): data = line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value",
"tiers for the property :number_of_houses: (int) Number of houses on the property, 0",
"all squares where an effect is applied. Including Chance, Community Chest, Income tax,",
"tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name,",
"(String) Name of the location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location:",
"str(self.price) + \"? y/n\") return buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged to True,",
"elif key == 10: return self.house_repairs() elif key == 11: return self.lose_money(15) elif",
"Square# ClassType class data \"\"\" loop_value = True while loop_value: try: if get_yes_or_no_input('Would",
"content is the property \"\"\" spaces = {} @classmethod def default_board(cls): \"\"\" Builds",
"board :param name: (String) Name of the Tile :param percent: (float or String)",
"buy_or_pass: # buy player.money = player.money - self.price self.owner = player player.owned_properites.update({self.location: self})",
"location_to_check += 1 if location_to_check > 39: location_to_check = location_to_check % 40 passed_go",
"Chest, Income tax, etc. Attributes: :location: (int) position, (0 - 39), on the",
"\", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give player money :param amount:",
"in a board from file. Each line in the file should be formatted",
"possible different tiles, used with read_in_board in Board \"\"\" @staticmethod def create_tile(data): \"\"\"",
"Charges player percent of their total wealth and gives it to free parking",
"amount): \"\"\" Takes player's money :param amount: (int) amount of money to take",
"Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location, name) def landed_on(self, player): pass def __str__(self):",
"\"\"\" Determines the price for one house based on the location :param location:",
"== 0: return self.advance_to_tile(0) elif key == 1: return self.gain_money(200) elif key ==",
"self.gain_money(100) else: print(\"bad CC draw\") class Board(object): \"\"\" The Monopoly Board Attributes: :spaces:",
"landed_on(self, player): \"\"\" Sets Active player to player, then calls draw_card() :param player:",
"owner to Bank, \"\"\" self.is_mortgaged = True self.price = self.price / 2 Bank.exchange_money(self.owner,",
"Gives player a get out of jail free card \"\"\" print(\"You got a",
"a get out of jail free card\", \"\\n\\t you now have \", self.active_player.get_out_of_jail_cards)",
"25 * hold except AttributeError: # Corner Tiles have no attribute owner, skipped",
"Bank, FreeParking from InputValidation import get_yes_or_no_input from random import randint from Exceptions import",
"based on the location :param location: (int) location on the board :return: (int)",
"Chance(x, \"Chance Card\") for x in [7, 22, 36]} community_chest = {x: CommunityChest(x,",
"return {position: Property(position, name, data)} elif class_type == \"Utility\": return {position: Utility(position, name)}",
"\"Chance\": return {position: Chance(position, name)} elif class_type == \"CommunityChest\": return {position: CommunityChest(position, name)}",
"and calls the appropriate method \"\"\" key = randint(0, 16) if key ==",
"(Boolean) mortgage state of the tile \"\"\" @abstractmethod def __init__(self, location, name, price):",
"= '' for key in cls.spaces: output = output + \"\\n\" + cls.spaces[key].__str__()",
"return {position: FreeParking(position)} elif class_type == \"Go\": return {0: Go()} elif class_type ==",
"player rent, transfers rent between owner and player :param player: (Player Object) Player",
"= {} streets = {x: Property(x, \"Name\", [\"Color\", 150, 5, 10, 20, 40,",
"of the location ---- New In Card Class ---- :active_player: (Player Object) Player",
"@staticmethod def create_tile(data): \"\"\" Creates a tile based on the data provided :param",
"jail print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class that sends people to jail",
":location: (int) position, (0 - 39), on the monopoly board :name: (String) Name",
"super().__init__(location, name) def landed_on(self, player): pass def __str__(self): return \"This is the jail\"",
"0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines the",
"default board for testing \"\"\" cls.spaces = {} streets = {x: Property(x, \"Name\",",
"22, 36]} community_chest = {x: CommunityChest(x, \"Community Chest Card\") for x in [2,",
"roll * multiplier[num_utils_owned]) class Railroad(Location): \"\"\" Defines all 4 railroads Attributes: ---- From",
"= name self.price = price self.owner = Bank self.is_mortgaged = False super().__init__() def",
"key == 9: return self.go_to_jail() elif key == 10: return self.house_repairs() elif key",
"to player Charges Player unmortgage price :param player: (Player Object) Player that is",
"mortgage(self): \"\"\" Sets is_mortgaged to True, Gives owner mortgage value (1/2 price), Sets",
"owner, skipped pass print(\"House repairs are expensive!\") if owed_money == 0: print(\"Lucky for",
"\"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Chance(Card): \"\"\" All Chance",
"specified class type :param class_type: (Object) class of tile to advance to examples:",
"(String) Easy to read owner information \"\"\" if isinstance(self.owner, Bank): owned_by = \"Owner:",
"if player will pass go if self.active_player.position >= tile_num: self.active_player.money += 200 self.active_player.position",
"calls that tiles landed_on method. :param num_tiles: (int) Number of tiles to be",
"amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives player a get out of jail",
"\"\"\" Charges player house repairs \"\"\" owed_money = 0 for key in Board.spaces:",
"on the property, 0 - 5 Zero is No houses Five is a",
"---- New in PercentTax Class ---- :percent: percent to tax the player \"\"\"",
"class Property(Location): \"\"\" Defines all the Properties on the board Does not include",
"tile :is_mortgaged: (Boolean) mortgage state of the tile ---- New in Property Class",
"__init__(self, location='jail', name='jail'): super().__init__(location, name) def landed_on(self, player): pass def __str__(self): return \"This",
"return self.gain_money(50) elif key == 7: self.get_out_of_jail_free() elif key == 8: return self.go_back(3)",
"read in from a file :return: A tile to be added to the",
"class CommunityChest(Card): \"\"\" All Community Chest Cards Attributes: ---- From Card Class ----",
"Tile :param percent: (float or String) percent to tax the player \"\"\" self.percent",
"randint(1, 6) for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Utility): num_utils_owned += 1 self.owner.exchange_money(player,",
"not like to buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would you like to buy",
"def pay_all_other_players(self, amount): \"\"\" Active player pays all other players specified amount :param",
"collect $200 \"\"\" print(\"Oh No! you've been sent to jail!!\") self.active_player.position = 'jail'",
"= get_yes_or_no_input( \"Would you like to buy \" + self.name + \" for",
"{position: FreeParking(position)} elif class_type == \"Go\": return {0: Go()} elif class_type == 'JustVisiting':",
":name: (String) Name of the location :active_player: (Player Object) Player that the card",
"break except IndexError: data = None class SetTax(Effect): \"\"\" Charges player a set",
"CommunityChest(x, \"Community Chest Card\") for x in [2, 17, 33]} free_parking = {20:",
"not isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1 if location_to_check > 39: location_to_check =",
"(String) Formatted __str__ method for all objects in spaces \"\"\" output = ''",
"amount): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"of the tile \"\"\" def __init__(self, location, name, price=200): \"\"\" :param location: (int)",
"class Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location, name) def landed_on(self, player): pass def",
"card \"\"\" print(\"You got a get out of jail free card\", \"\\n\\t you",
"== 'GoToJail': return {30: GoToJail()} elif class_type == 'jail': return {'jail': Jail()} else:",
"== 11: return self.lose_money(50) elif key == 12: return self.lose_money(150) elif key ==",
"set tax amount, is not dependant on the player's wealth Attributes: ---- From",
"in [5, 15, 25, 35]} utilities = {x: Utility(x, \"Name\") for x in",
"= output + \"\\n\" + cls.spaces[key].__str__() return output # construct the default board",
"Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances active player",
"self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the player to jail, player does",
"PercentTax Class ---- :percent: percent to tax the player \"\"\" def __init__(self, location,",
"self.gain_money(20) elif key == 9: return self.get_money_from_all_other_players(10) elif key == 10: return self.gain_money(100)",
"11: return self.lose_money(50) elif key == 12: return self.lose_money(150) elif key == 13:",
"\"SetTax\": return {position: SetTax(position, name, data[0])} elif class_type == \"PercentTax\": return {position: PercentTax(position,",
"owner mortgage value (1/2 price), Sets price to 1/2 price, Sets owner to",
"Not Found!\") break except IndexError: data = None class SetTax(Effect): \"\"\" Charges player",
"True, Gives owner mortgage value (1/2 price), Sets price to 1/2 price, Sets",
"\"\"\" self.percent = float(percent) super().__init__(location, name) def landed_on(self, player): \"\"\" Charges player percent",
"\"\"\" def __init__(self, location, name, property_data): \"\"\" :param location: (Int) position on the",
"Models GO Tile Attributes: ---- From CornerTile Class ---- :location: (int) position, (0",
"\"\"\" Sets Active player to player, then calls draw_card() :param player: (Player Object)",
"elif class_type == \"Chance\": return {position: Chance(position, name)} elif class_type == \"CommunityChest\": return",
"tile is purchased, transfers money, updates owner, and sets is_mortgaged to False :param",
"that landed on the tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: # buy",
"class Chance(Card): \"\"\" All Chance Cards Attributes: ---- From Card Class ---- :location:",
"self.advance_to_tile(24) elif key == 2: return self.advance_to_tile(11) elif key == 3: return self.advance_to_next(Utility)",
"spaces \"\"\" output = '' for key in cls.spaces: output = output +",
"the location :active_player: (Player Object) Player that the card will be affecting \"\"\"",
"elif location > 20: return 150 elif location > 10: return 100 else:",
"Board.spaces[location_to_check], class_type): location_to_check += 1 if location_to_check > 39: location_to_check = location_to_check %",
"name) def draw_card(self): \"\"\" Chooses a random random card and calls the appropriate",
"def lose_money(self, amount): \"\"\" Takes player's money :param amount: (int) amount of money",
"return {30: GoToJail()} elif class_type == 'jail': return {'jail': Jail()} else: raise TilesClassNotFoundError",
"get_yes_or_no_input('Would You Like To Use The Standard Board?'): file_name = 'StandardBoard' else: file_name",
"board :name: (String) Name of the location :price: (int) purchase cost of the",
"== 4: return self.advance_to_next(Railroad) elif key == 5: return self.advance_to_next(Railroad) elif key ==",
"of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): print(\"Landed on Go!\") class",
"self.get_money_from_all_other_players(10) elif key == 10: return self.gain_money(100) elif key == 11: return self.lose_money(50)",
"def __init__(self, location=30, name='Go To Jail'): super().__init__(location, name) def landed_on(self, player): player.position =",
"0: return self.advance_to_tile(0) elif key == 1: return self.gain_money(200) elif key == 2:",
"Income tax, etc. Attributes: :location: (int) position, (0 - 39), on the monopoly",
"pass go and does not collect $200 \"\"\" print(\"Oh No! you've been sent",
"---- :amount: Amount to tax the player \"\"\" def __init__(self, location, name, amount):",
"on the data provided :param data: Data read in from a file :return:",
"Easy to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax Amount:",
"== 1: return self.advance_to_tile(24) elif key == 2: return self.advance_to_tile(11) elif key ==",
"elif key == 5: return self.advance_to_next(Railroad) elif key == 6: return self.gain_money(50) elif",
"back \"\"\" self.active_player.position -= num_tiles print(\"You've been sent back \", num_tiles, \"tiles.\", \"\\nYou're",
"100, 3: 150, 4: 200} for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad): num_railroads_owned",
"cost of the tile \"\"\" self.location = location self.name = name self.price =",
"tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the player to",
"16) if key == 0: return self.advance_to_tile(0) elif key == 1: return self.advance_to_tile(24)",
"Card\") for x in [2, 17, 33]} free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads)",
"def landed_on(self, player): pass def __str__(self): \"\"\" :return: (String) Description of the tile",
"= [int(x) for x in data[3:]] except ValueError: last_part_data = [int(x) for x",
"super().__init__(location, name) def landed_on(self, player): print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\" Models Just",
"on the monopoly board :name: (String) Name of the location :price: (int) purchase",
">= tile_num: self.active_player.money += 200 self.active_player.position = tile_num print(\"You've been moved to :\",",
"..., rent_hotel] \"\"\" self.color = property_data[0] self.rent = property_data[2:] self.number_of_houses = 0 self.cost_per_house",
"All Community Chest Cards Attributes: ---- From Card Class ---- :location: (int) position,",
"of the tile \"\"\" self.location = location self.name = name self.price = price",
"Gives the player the option to purchase the tile, if the tile is",
"for all objects in spaces \"\"\" output = '' for key in cls.spaces:",
"Class ---- :location: (int) position, (0 - 39), on the monopoly board :name:",
"Object) Player that landed on tile \"\"\" num_railroads_owned = 0 cost = {1:",
"the monopoly board :param name: (String) Name of the Tile \"\"\" self.active_player =",
":return: (String) Easy to read tile description \"\"\" rent_tiers = '' for tier",
"else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not Found!\") break",
"percent of their total wealth and gives it to free parking :param player:",
"$\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives player a get out of",
"of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): # TODO: find a",
"key == 16: return self.gain_money(100) else: return print(\"Bad Chance Card Draw\") class CommunityChest(Card):",
"(String) Name of the Tile :param amount: (int) amount to tax the player",
"player does not pass go and does not collect $200 \"\"\" print(\"Oh No!",
"key == 1: return self.gain_money(200) elif key == 2: return self.lose_money(50) elif key",
"= 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def set_cost_per_house(location): \"\"\" Determines",
"monopoly board :name: (String) Name of the location :active_player: (Player Object) Player that",
"def landed_on(self, player): player.position = 'jail' print(\"Go To Jail!!!\") class Jail(CornerTile): def __init__(self,",
"what players are in jail print(\"Just Visiting Jail\") class GoToJail(CornerTile): \"\"\" Class that",
"\"\"\" Formats Current rent for __str__ :return: (String) Current Rent \"\"\" return str(self.rent[self.number_of_houses])",
"of specified class type :param class_type: (Object) class of tile to advance to",
"type :param class_type: (Object) class of tile to advance to examples: Railroad, Utility,",
"float(percent) super().__init__(location, name) def landed_on(self, player): \"\"\" Charges player percent of their total",
"self.gain_money(50) elif key == 4: return self.get_out_of_jail_free() elif key == 5: return self.go_to_jail()",
"players print(\"Lucky for {} I don't know how to make you pay everyone",
"= float(percent) super().__init__(location, name) def landed_on(self, player): \"\"\" Charges player percent of their",
"39), on the monopoly board :name: (String) Name of the location :price: (int)",
"tile \"\"\" def __init__(self, location, name, price=150): \"\"\" :param location: (int) Location, (0",
"the location ---- New In Card Class ---- :active_player: (Player Object) Player that",
"description \"\"\" output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class",
"monopoly board :param name: (String) Name of the Tile \"\"\" super().__init__(location, name) def",
"4: return self.get_out_of_jail_free() elif key == 5: return self.go_to_jail() elif key == 6:",
"import Player, Bank, FreeParking from InputValidation import get_yes_or_no_input from random import randint from",
"key == 3: return self.advance_to_next(Utility) elif key == 4: return self.advance_to_next(Railroad) elif key",
"def __str__(self): \"\"\" :return: (String) Description of the tile \"\"\" output = \"{0}",
"free card \"\"\" print(\"You got a get out of jail free card\", \"\\n\\t",
"money to take from active player \"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount)",
"== \"Railroad\": return {position: Railroad(position, name)} elif class_type == \"Chance\": return {position: Chance(position,",
"of the Tile :param price: (Optional, int, default=150) purchase cost of the tile",
"create_tile(data): \"\"\" Creates a tile based on the data provided :param data: Data",
"To Use The Standard Board?'): file_name = 'StandardBoard' else: file_name = input(\"Please enter",
"Card \"\"\" location_to_check = self.active_player.position + 1 passed_go = False while not isinstance(",
"a file :return: A tile to be added to the board \"\"\" while",
"= \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class Property(Location): \"\"\" Defines",
"+ str(self.price) + \"? y/n\") return buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged to",
"on the board Attributes: :location: (int) position, (0 - 39), on the monopoly",
"for line in file: if not line.startswith('#'): data = line.split() new_tile = TileFactory.create_tile(data)",
"pass print(\"House repairs are expensive!\") if owed_money == 0: print(\"Lucky for you, you",
"150 elif location > 10: return 100 else: return 50 def __str__(self): \"\"\"",
"[\"Color\", 150, 5, 10, 20, 40, 80, 160]) for x in range(0, 40)}",
"return self.lose_money(50) elif key == 3: return self.gain_money(50) elif key == 4: return",
"that landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the player",
"like to buy False if the player would not like to buy \"\"\"",
"amount of money to take from active player \"\"\" print(\"You've lost $\", amount)",
"\"\"\" super().__init__(location, name) def landed_on(self, player): # TODO: find a way to print",
"2: 100, 3: 150, 4: 200} for key in self.owner.owned_properites: if isinstance(self.owner.owned_properites[key], Railroad):",
"key == 10: return self.gain_money(100) elif key == 11: return self.lose_money(50) elif key",
"tile, if the tile is purchased, transfers money, updates owner, and sets is_mortgaged",
"would not like to buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would you like to",
"\"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives player a",
"__init__(self, location=30, name='Go To Jail'): super().__init__(location, name) def landed_on(self, player): player.position = 'jail'",
"15, 25, 35]} utilities = {x: Utility(x, \"Name\") for x in [12, 28]}",
"location \"\"\" def __init__(self, location=0, name='GO'): \"\"\" :param location: (int) Location, (0 -",
"on Go!\") class JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail) tile Attributes: ---- From",
"file: for line in file: if not line.startswith('#'): data = line.split() new_tile =",
"hold except AttributeError: # Corner Tiles have no attribute owner, skipped pass print(\"House",
":owner: (UserEntity Object) Current Owner of the tile :is_mortgaged: (Boolean) mortgage state of",
"read_in_board in Board \"\"\" @staticmethod def create_tile(data): \"\"\" Creates a tile based on",
"Visiting (jail) tile Attributes: ---- From CornerTile Class ---- :location: (int) position, (0",
"you pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active player gets money",
"for __str__ :return: (String) Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines",
"to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax Amount: ${2}\"\\",
"def get_money_from_all_other_players(self, amount): \"\"\" Active player gets money from all other players :param",
"landed_on(self, player): \"\"\" Takes amount from player and adds it to Free Parking",
"you have no houses\") else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self,",
"return self.advance_to_tile(39) elif key == 14: return self.pay_all_other_players(50) elif key == 15: return",
"key == 7: self.get_out_of_jail_free() elif key == 8: return self.go_back(3) elif key ==",
"Easy to read tile description \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output",
"try: if data is not None: position = int(data[0]) class_type = data[1] name",
"---- From Card Class ---- :location: (int) position, (0 - 39), on the",
"else: print(\"bad CC draw\") class Board(object): \"\"\" The Monopoly Board Attributes: :spaces: (Dict)",
"= Bank self.is_mortgaged = False super().__init__() def landed_on(self, player): \"\"\" Calls the proper",
"{'jail': Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass Type: \", data[1], \" Not",
"== \"Q\": quit() print(\"File Not found, please try again.\\n\\tOr Enter Q to quit\\n\")",
"Property(position, name, data)} elif class_type == \"Utility\": return {position: Utility(position, name)} elif class_type",
"= location_to_check % 40 passed_go = True self.active_player.position = location_to_check if passed_go: self.active_player.money",
"of the location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location: (int) Location,",
"no attribute owner, skipped pass print(\"House repairs are expensive!\") if owed_money == 0:",
"if self.owner == Bank: self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player) def owned_by_bank(self, player):",
"rent, transfers rent between owner and player :param player: (Player Object) Player that",
"\"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return output class PercentTax(Effect): \"\"\" Charges player",
"go and does not collect $200 \"\"\" print(\"Oh No! you've been sent to",
"in the file should be formatted as follows: Square# ClassType class data \"\"\"",
"num_tiles: (int) Number of tiles to be moved back \"\"\" self.active_player.position -= num_tiles",
"class Card(Effect): \"\"\" Parent Class for Chance and Community Chest Cards Attributes: ----",
"tile \"\"\" self.location = location self.name = name self.price = price self.owner =",
"monopoly board :name: (String) Name of the location ---- New in SetTax Class",
"= {x: Utility(x, \"Name\") for x in [12, 28]} chances = {x: Chance(x,",
"True: try: if data is not None: position = int(data[0]) class_type = data[1]",
"(String) Name of the Tile \"\"\" self.location = location self.name = name @abstractmethod",
"return {position: PercentTax(position, name, data[0])} elif class_type == \"FreeParking\": return {position: FreeParking(position)} elif",
"and Community Chest Cards Attributes: ---- From Effect Class ---- :location: (int) position,",
"player): \"\"\" Sets is_mortgaged to False, Sets price to full price Sets owner",
"(int) amount of money to take from active player \"\"\" print(\"You've lost $\",",
"mortgage state of the tile \"\"\" def __init__(self, location, name, price=150): \"\"\" :param",
"elif class_type == 'jail': return {'jail': Jail()} else: raise TilesClassNotFoundError except TilesClassNotFoundError: print(\"\\n\\nError!!\\n\\tClass",
":param num_tiles: (int) Number of tiles to be moved back \"\"\" self.active_player.position -=",
"the monopoly board :param name: (Optional, String, default=JustVisiting) Name of the Tile \"\"\"",
"self.active_player.position = tile_num print(\"You've been moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return",
":name: (String) Name of the location \"\"\" @abstractmethod def __init__(self, location, name): \"\"\"",
"12: return self.lose_money(150) elif key == 13: return self.gain_money(25) elif key == 14:",
"monopoly board :param name: (String) Name of the Tile :param percent: (float or",
"cost = {1: 50, 2: 100, 3: 150, 4: 200} for key in",
"of the tile :owner: (UserEntity Object) Current Owner of the tile :is_mortgaged: (Boolean)",
"= randint(0, 16) if key == 0: return self.advance_to_tile(0) elif key == 1:",
"to the next \", str(class_type), \"\\n\\tTile Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self,",
"print(\"Oh No! you've been sent to jail!!\") self.active_player.position = 'jail' def house_repairs(self): \"\"\"",
"array-like) Rent tiers for the property :number_of_houses: (int) Number of houses on the",
"return self.advance_to_tile(24) elif key == 2: return self.advance_to_tile(11) elif key == 3: return",
"Current rent for __str__ :return: (String) Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location):",
"from other players \"\"\" amount = amount * -1 self.pay_all_other_players(amount) def __str__(self): \"\"\"",
"houses\") else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\" Active",
"String, default=GO) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): print(\"Landed",
"Card(Effect): \"\"\" Parent Class for Chance and Community Chest Cards Attributes: ---- From",
"\"tiles.\", \"\\nYou're now on tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\"",
"a hotel :cost_per_house: (int) Price of one house \"\"\" def __init__(self, location, name,",
"\"\"\" def __init__(self, location, name, amount): \"\"\" :param location: (int) Location, (0 -",
"the tile \"\"\" def __init__(self, location, name, price=200): \"\"\" :param location: (int) Location,",
"for you, you have no houses\") else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money)",
"key == 12: return self.advance_to_tile(5) elif key == 13: return self.advance_to_tile(39) elif key",
"from all other players :param amount: (int) amount gotten from other players \"\"\"",
":is_mortgaged: (Boolean) mortgage state of the tile ---- New in Property Class ----",
"self.lose_money(15) elif key == 12: return self.advance_to_tile(5) elif key == 13: return self.advance_to_tile(39)",
"of the Tile :param price: (Optional, int, default=200) purchase cost of the tile",
"price=200): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"self.pay_all_other_players(50) elif key == 15: return self.gain_money(150) elif key == 16: return self.gain_money(100)",
"price), Sets price to 1/2 price, Sets owner to Bank, \"\"\" self.is_mortgaged =",
"Object) Current Owner of the tile :is_mortgaged: (Boolean) mortgage state of the tile",
"self.active_player: hold = Board.spaces[key].number_of_houses owed_money += 25 * hold except AttributeError: # Corner",
"line in the file should be formatted as follows: Square# ClassType class data",
"tiles to be moved back \"\"\" self.active_player.position -= num_tiles print(\"You've been sent back",
"landed_on method. :param num_tiles: (int) Number of tiles to be moved back \"\"\"",
"Sets owner to Bank, \"\"\" self.is_mortgaged = True self.price = self.price / 2",
"to specified tile and calls that tile's landed_on method :param tile_num: (int) Tile",
"and does not collect $200 \"\"\" print(\"Oh No! you've been sent to jail!!\")",
"tax the player \"\"\" def __init__(self, location, name, amount): \"\"\" :param location: (int)",
"who owns the property :param player: (Player Object) The player that landed on",
"def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" output =",
"self.active_player.position = 'jail' def house_repairs(self): \"\"\" Charges player house repairs \"\"\" owed_money =",
"key == 13: return self.advance_to_tile(39) elif key == 14: return self.pay_all_other_players(50) elif key",
":param amount: (int) amount to tax the player \"\"\" self.amount = int(amount) super().__init__(location,",
"the tile :owner: (UserEntity Object) Current Owner of the tile :is_mortgaged: (Boolean) mortgage",
"location=10, name=\"JustVisiting\"): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly",
"expensive!\") if owed_money == 0: print(\"Lucky for you, you have no houses\") else:",
"would like to purchase the property, displays the Name and price :return: (Boolean)",
"Object) Player that landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks",
"= {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\"",
"the location of a tile and the content is the property \"\"\" spaces",
"elif class_type == \"Utility\": return {position: Utility(position, name)} elif class_type == \"Railroad\": return",
"name: (String) Name of the Tile \"\"\" super().__init__(location, name) def draw_card(self): \"\"\" Chooses",
"\"\"\" self.active_player.position -= num_tiles print(\"You've been sent back \", num_tiles, \"tiles.\", \"\\nYou're now",
"like to purchase the property, displays the Name and price :return: (Boolean) True",
"print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail) tile Attributes: ----",
"= 0 multiplier = {1: 4, 2: 10} roll = randint(1, 6) for",
"found, please try again.\\n\\tOr Enter Q to quit\\n\") @classmethod def __str__(cls): \"\"\" :return:",
"Asks the player if they would like to purchase the property, displays the",
"Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent Class for all locations on",
"Tile \"\"\" self.location = location self.name = name @abstractmethod def landed_on(self, player): pass",
"+ \" for $\" + str(self.price) + \"? y/n\") return buy_or_pass def mortgage(self):",
"if location > 30: return 200 elif location > 20: return 150 elif",
"Name of the Tile :param amount: (int) amount to tax the player \"\"\"",
"Jail'): super().__init__(location, name) def landed_on(self, player): player.position = 'jail' print(\"Go To Jail!!!\") class",
"Object) Player that landed on card tile :return: calls draw_card \"\"\" self.active_player =",
"self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives the player the option to purchase the",
"\"\"\" self.location = location self.name = name @abstractmethod def landed_on(self, player): pass class",
"(String) Name of the location :active_player: (Player Object) Player that the card will",
"the tile :is_mortgaged: (Boolean) mortgage state of the tile ---- New in Property",
"self.name + \" for $\" + str(self.price) + \"? y/n\") return buy_or_pass def",
"\"\"\" :return: (String) Easy to read tile description \"\"\" output = \"{0} {1}\"",
"the corner tiles Excluding Free Parking. Attributes: :location: (int) position, (0 - 39),",
"pass go if self.active_player.position >= tile_num: self.active_player.money += 200 self.active_player.position = tile_num print(\"You've",
"monopoly board :name: (String) Name of the location :price: (int) purchase cost of",
"== \"Property\": return {position: Property(position, name, data)} elif class_type == \"Utility\": return {position:",
"if the player would like to buy False if the player would not",
"self.is_mortgaged = False super().__init__() def landed_on(self, player): \"\"\" Calls the proper function depending",
"board :name: (String) Name of the location ---- New in SetTax Class ----",
"the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): print(\"Landed on Go!\") class JustVisiting(CornerTile):",
"FreeParking from InputValidation import get_yes_or_no_input from random import randint from Exceptions import TilesClassNotFoundError",
"default=150) purchase cost of the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self, player):",
"16) if key == 0: return self.advance_to_tile(0) elif key == 1: return self.gain_money(200)",
"range(0, 40)} railroads = {x: Railroad(x, \"Name\") for x in [5, 15, 25,",
"Board \"\"\" @staticmethod def create_tile(data): \"\"\" Creates a tile based on the data",
"def landed_on(self, player): \"\"\" Charges player percent of their total wealth and gives",
"people to jail \"\"\" def __init__(self, location=30, name='Go To Jail'): super().__init__(location, name) def",
":param price: (Optional, int, default=200) purchase cost of the tile \"\"\" super().__init__(location, name,",
"player: (Player Object) Player that landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self):",
"__str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" rent_tiers = ''",
"board \"\"\" while True: try: if data is not None: position = int(data[0])",
"int, default=150) purchase cost of the tile \"\"\" super().__init__(location, name, price) def owned_by_player(self,",
"(0 - 39) on the monopoly board :param name: (String) Name of the",
"a tile and the content is the property \"\"\" spaces = {} @classmethod",
"---- :color: (String) Color of the property :rent: (1x6 array-like) Rent tiers for",
"name, data)} elif class_type == \"Utility\": return {position: Utility(position, name)} elif class_type ==",
"\"\"\" Abstract Parent Class for all locations on the board Attributes: :location: (int)",
"location :active_player: (Player Object) Player that the card will be affecting \"\"\" def",
"does not pass go and does not collect $200 \"\"\" print(\"Oh No! you've",
"try: if get_yes_or_no_input('Would You Like To Use The Standard Board?'): file_name = 'StandardBoard'",
"To Jail!!!\") class Jail(CornerTile): def __init__(self, location='jail', name='jail'): super().__init__(location, name) def landed_on(self, player):",
":param amount: (int) amount to pay other players \"\"\" # TODO: implement pay",
"sent back \", num_tiles, \"tiles.\", \"\\nYou're now on tile number: \", self.active_player.position) return",
"find a way to print out what players are in jail print(\"Just Visiting",
"on the property and who owns the property :param player: (Player Object) The",
"__str__ :return: (String) Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all",
"\"\"\" Give player money :param amount: (int) Amount of money to give active",
"house repairs \"\"\" owed_money = 0 for key in Board.spaces: try: if Board.spaces[key].owner",
"self.is_mortgaged = False self.price = self.price * 2 def owned_by_player(self, player): \"\"\" Charges",
"self.active_player.position >= tile_num: self.active_player.money += 200 self.active_player.position = tile_num print(\"You've been moved to",
"= int(amount) super().__init__(location, name) def landed_on(self, player): \"\"\" Takes amount from player and",
"Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output def format_current_rent(self):",
"output = \"{0} {1}\".format(self.location, self.name) return output class Chance(Card): \"\"\" All Chance Cards",
"on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the player if they",
"player :param player: (Player Object) Player that landed on tile \"\"\" num_railroads_owned =",
"location_to_check = self.active_player.position + 1 passed_go = False while not isinstance( Board.spaces[location_to_check], class_type):",
"the location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location: (int) Location, (0",
"tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name,",
"\"\"\" def __init__(self, location=0, name='GO'): \"\"\" :param location: (int) Location, (0 - 39)",
"of the Tile \"\"\" self.active_player = None super().__init__(location, name) def landed_on(self, player): \"\"\"",
"player a get out of jail free card \"\"\" print(\"You got a get",
"2: return self.advance_to_tile(11) elif key == 3: return self.advance_to_next(Utility) elif key == 4:",
"the property :param player: (Player Object) The player that landed on the tile",
"{0: Go()} elif class_type == 'JustVisiting': return {10: JustVisiting()} elif class_type == 'GoToJail':",
"on tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def go_to_jail(self): \"\"\" Sends the player",
"not None: position = int(data[0]) class_type = data[1] name = data[2] try: data",
"PercentTax(position, name, data[0])} elif class_type == \"FreeParking\": return {position: FreeParking(position)} elif class_type ==",
"elif key == 8: return self.go_back(3) elif key == 9: return self.go_to_jail() elif",
"39) on the monopoly board :param name: (String) Name of the Tile \"\"\"",
"Player that the card will be affecting \"\"\" def __init__(self, location, name): \"\"\"",
"Board.spaces[20].exchange_money(player, self.amount) def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\"",
"\"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances active player to",
"def get_out_of_jail_free(self): \"\"\" Gives player a get out of jail free card \"\"\"",
"isinstance(self.owner, Bank): owned_by = \"Owner: {0}, Current Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else:",
"return self.gain_money(100) elif key == 11: return self.lose_money(50) elif key == 12: return",
"buy \" + self.name + \" for $\" + str(self.price) + \"? y/n\")",
"2 Bank.exchange_money(self.owner, self.price) self.owner = Bank def unmortgage(self, player): \"\"\" Sets is_mortgaged to",
"name) def landed_on(self, player): # TODO: find a way to print out what",
"\"\"\" owed_money = 0 for key in Board.spaces: try: if Board.spaces[key].owner == self.active_player:",
"location :price: (int) purchase cost of the tile :owner: (UserEntity Object) Current Owner",
"testing \"\"\" cls.spaces = {} streets = {x: Property(x, \"Name\", [\"Color\", 150, 5,",
"default=GO) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player): print(\"Landed on",
"free card\", \"\\n\\t you now have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self,",
"name: (String) Name of the Tile :param price: (Optional, int, default=200) purchase cost",
"\"\"\" if self.owner == Bank: self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player) def owned_by_bank(self,",
"- 39), on the monopoly board :name: (String) Name of the location \"\"\"",
"the Tile :param percent: (float or String) percent to tax the player \"\"\"",
"Charges player house repairs \"\"\" owed_money = 0 for key in Board.spaces: try:",
"(int) Number of houses on the property, 0 - 5 Zero is No",
"Tile \"\"\" super().__init__(location, name) def landed_on(self, player): print(\"Landed on Go!\") class JustVisiting(CornerTile): \"\"\"",
"applied. Including Chance, Community Chest, Income tax, etc. Attributes: :location: (int) position, (0",
"name)} elif class_type == \"Railroad\": return {position: Railroad(position, name)} elif class_type == \"Chance\":",
"\"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent) class",
"money, updates owner, and sets is_mortgaged to False :param player: (Player Object) Player",
"location: (Int) position on the board, int from 0 to 39 :param property_data:",
"{position: Utility(position, name)} elif class_type == \"Railroad\": return {position: Railroad(position, name)} elif class_type",
"tile description \"\"\" rent_tiers = '' for tier in self.rent: rent_tiers += str(tier)",
"# -------------Card effects -------------- def advance_to_tile(self, tile_num): \"\"\" Moves player to specified tile",
"print(\"You've been moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self,",
"= {x: CommunityChest(x, \"Community Chest Card\") for x in [2, 17, 33]} free_parking",
"(Dict) A dictionary where the key is the location of a tile and",
"not pass go and does not collect $200 \"\"\" print(\"Oh No! you've been",
"self.rent: rent_tiers += str(tier) + ', ' owned_by = self.format_owner() output = \"{0}",
"Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\" Active player pays all other players specified",
"def __str__(cls): \"\"\" :return: (String) Formatted __str__ method for all objects in spaces",
"= data[1] name = data[2] try: data = [int(x) for x in data[3:]]",
"Parent Class For Each of the corner tiles Excluding Free Parking. Attributes: :location:",
"player :param player: (Player Object) Player that landed on tile \"\"\" num_utils_owned =",
"== 0: return self.advance_to_tile(0) elif key == 1: return self.advance_to_tile(24) elif key ==",
"in self.rent: rent_tiers += str(tier) + ', ' owned_by = self.format_owner() output =",
"read_in_board(cls): \"\"\" read in a board from file. Each line in the file",
"return {position: Chance(position, name)} elif class_type == \"CommunityChest\": return {position: CommunityChest(position, name)} elif",
"file_name) as file: for line in file: if not line.startswith('#'): data = line.split()",
"== 16: return self.gain_money(100) else: return print(\"Bad Chance Card Draw\") class CommunityChest(Card): \"\"\"",
"TileFactory: \"\"\" Creates all possible different tiles, used with read_in_board in Board \"\"\"",
"to buy False if the player would not like to buy \"\"\" buy_or_pass",
"Description of the tile \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class",
"---- New In Card Class ---- :active_player: (Player Object) Player that the card",
"percent to tax the player \"\"\" def __init__(self, location, name, percent): \"\"\" :param",
"(Player Object) Player that landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\"",
"(String) Name of the Tile \"\"\" self.active_player = None super().__init__(location, name) def landed_on(self,",
"\\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return output class PercentTax(Effect): \"\"\" Charges",
"key == 16: return self.gain_money(100) else: print(\"bad CC draw\") class Board(object): \"\"\" The",
"method \"\"\" key = randint(0, 16) if key == 0: return self.advance_to_tile(0) elif",
"advance to examples: Railroad, Utility, Card \"\"\" location_to_check = self.active_player.position + 1 passed_go",
"Railroad, Utility, Card \"\"\" location_to_check = self.active_player.position + 1 passed_go = False while",
"the tile ---- New in Property Class ---- :color: (String) Color of the",
"to read owner information \"\"\" if isinstance(self.owner, Bank): owned_by = \"Owner: {0}, Current",
"39), on the monopoly board :name: (String) Name of the location ---- New",
"if not line.startswith('#'): data = line.split() new_tile = TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False",
"Name of the location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"): \"\"\" :param location: (int)",
":return: (String) Formatted __str__ method for all objects in spaces \"\"\" output =",
"the monopoly board :param name: (String) Name of the Tile :param percent: (float",
"\"Go\": return {0: Go()} elif class_type == 'JustVisiting': return {10: JustVisiting()} elif class_type",
"Percent Tax \"\"\" Board.spaces[20].exchange_money(player, player.money * self.percent) def __str__(self): \"\"\" :return: (String) Easy",
"Calls the proper function depending on who landed on the property and who",
"not include railroads or utilities Attributes: ---- From Location Class ---- :location: (int)",
"rent_tiers = '' for tier in self.rent: rent_tiers += str(tier) + ', '",
"class SetTax(Effect): \"\"\" Charges player a set tax amount, is not dependant on",
"buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: # buy player.money = player.money - self.price self.owner",
"' owned_by = self.format_owner() output = \"{0} {1} {2}\" \\ \"\\n\\t{3}\" \\ \"\\n\\tCost",
"0: return self.advance_to_tile(0) elif key == 1: return self.advance_to_tile(24) elif key == 2:",
"for tier in self.rent: rent_tiers += str(tier) + ', ' owned_by = self.format_owner()",
"read owner information \"\"\" if isinstance(self.owner, Bank): owned_by = \"Owner: {0}, Current Rent",
"== 12: return self.advance_to_tile(5) elif key == 13: return self.advance_to_tile(39) elif key ==",
"Free Parking. Attributes: :location: (int) position, (0 - 39), on the monopoly board",
"int(data[0]) class_type = data[1] name = data[2] try: data = [int(x) for x",
"examples: Railroad, Utility, Card \"\"\" location_to_check = self.active_player.position + 1 passed_go = False",
"Of Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses,",
"Location(ABC): \"\"\" Abstract Parent Class for all locations on the board Attributes: :location:",
"Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses owed_money += 25 * hold except AttributeError:",
"cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances) cls.spaces.update(community_chest) cls.spaces.update(free_parking) @classmethod def read_in_board(cls): \"\"\" read in a",
"follows [\"Color\", Price, rent, rent_1_house, ..., rent_hotel] \"\"\" self.color = property_data[0] self.rent =",
"(Boolean) mortgage state of the tile \"\"\" def __init__(self, location, name, price=200): \"\"\"",
"on the monopoly board :name: (String) Name of the location ---- New In",
"for one house based on the location :param location: (int) location on the",
"while True: try: if data is not None: position = int(data[0]) class_type =",
"= True self.active_player.position = location_to_check if passed_go: self.active_player.money += 200 print(\"You've advanced to",
"Moves player back specified number of spaces and calls that tiles landed_on method.",
"name, data[0])} elif class_type == \"PercentTax\": return {position: PercentTax(position, name, data[0])} elif class_type",
"self.active_player.position = location_to_check if passed_go: self.active_player.money += 200 print(\"You've advanced to the next",
"+ self.name + \" for $\" + str(self.price) + \"? y/n\") return buy_or_pass",
"the monopoly board :name: (String) Name of the location :price: (int) purchase cost",
"\"\"\" cls.spaces = {} streets = {x: Property(x, \"Name\", [\"Color\", 150, 5, 10,",
"\"\"\" Creates all possible different tiles, used with read_in_board in Board \"\"\" @staticmethod",
"player): \"\"\" Calls the proper function depending on who landed on the property",
"active player will be moved to \"\"\" # Checks if player will pass",
":param player: (Player Object) Player that landed on the tile \"\"\" buy_or_pass =",
"\"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\" Parent Class For Each",
"draw\") class Board(object): \"\"\" The Monopoly Board Attributes: :spaces: (Dict) A dictionary where",
"the tile, if the tile is purchased, transfers money, updates owner, and sets",
"else: return 50 def __str__(self): \"\"\" :return: (String) Easy to read tile description",
"Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return output class PercentTax(Effect): \"\"\" Charges player a",
"board :param name: (String) Name of the Tile \"\"\" super().__init__(location, name) def draw_card(self):",
"landed_on(self, player): \"\"\" Charges player percent of their total wealth and gives it",
"have \", self.active_player.get_out_of_jail_cards) self.active_player.get_out_of_jail_cards += 1 def go_back(self, num_tiles): \"\"\" Moves player back",
"(String) Name of the location \"\"\" def __init__(self, location=0, name='GO'): \"\"\" :param location:",
"Color of the property :rent: (1x6 array-like) Rent tiers for the property :number_of_houses:",
"landed_on(self, player): \"\"\" Calls the proper function depending on who landed on the",
"of tile to advance to examples: Railroad, Utility, Card \"\"\" location_to_check = self.active_player.position",
"file :return: A tile to be added to the board \"\"\" while True:",
"other players \"\"\" amount = amount * -1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return:",
"{1}\" \\ \"\\n\\tTax Amount: ${2}\"\\ .format(self.location, self.name, self.amount) return output class PercentTax(Effect): \"\"\"",
"self.amount) return output class PercentTax(Effect): \"\"\" Charges player a set tax amount, is",
"== 9: return self.go_to_jail() elif key == 10: return self.house_repairs() elif key ==",
"= property_data[2:] self.number_of_houses = 0 self.cost_per_house = self.set_cost_per_house(location) super().__init__(location, name, int(property_data[1])) @staticmethod def",
"to tax the player \"\"\" def __init__(self, location, name, amount): \"\"\" :param location:",
"and Water Works Attributes: ---- From Location Class ---- :location: (int) position, (0",
"Object) The player that landed on the tile \"\"\" if self.owner == Bank:",
"name @abstractmethod def landed_on(self, player): pass def __str__(self): \"\"\" :return: (String) Description of",
"\"Name\") for x in [12, 28]} chances = {x: Chance(x, \"Chance Card\") for",
"the Tile \"\"\" self.location = location self.name = name @abstractmethod def landed_on(self, player):",
"\"\"\" rent_tiers = '' for tier in self.rent: rent_tiers += str(tier) + ',",
"\", num_tiles, \"tiles.\", \"\\nYou're now on tile number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on() def",
":param name: (String) Name of the Tile :param amount: (int) amount to tax",
"Price of one house \"\"\" def __init__(self, location, name, property_data): \"\"\" :param location:",
"be added to the board \"\"\" while True: try: if data is not",
"the monopoly board :param name: (Optional, String, default=GO) Name of the Tile \"\"\"",
"{2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC): \"\"\" Parent Class For Each of the",
"the Properties on the board Does not include railroads or utilities Attributes: ----",
"player will pass go if self.active_player.position >= tile_num: self.active_player.money += 200 self.active_player.position =",
"player gets money from all other players :param amount: (int) amount gotten from",
"for x in [12, 28]} chances = {x: Chance(x, \"Chance Card\") for x",
":\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances active",
"\"\"\" :return: (String) Easy to read tile description \"\"\" rent_tiers = '' for",
"Sets is_mortgaged to False, Sets price to full price Sets owner to player",
"\"\\n\\tCost Per House: {4}, Number Of Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location,",
"key == 11: return self.lose_money(15) elif key == 12: return self.advance_to_tile(5) elif key",
"\"\"\" Formats current owner information for __str__() :return: (String) Easy to read owner",
"False super().__init__() def landed_on(self, player): \"\"\" Calls the proper function depending on who",
"value (1/2 price), Sets price to 1/2 price, Sets owner to Bank, \"\"\"",
"= tile_num print(\"You've been moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num) return Board.spaces[self.active_player.position].landed_on(self.active_player)",
".format(self.owner, self.price, self.is_mortgaged) return owned_by def __str__(self): \"\"\" :return: (String) Easy to read",
"Owner of the tile :is_mortgaged: (Boolean) mortgage state of the tile ---- New",
"the Tile :param price: (Optional, int, default=200) purchase cost of the tile \"\"\"",
"elif key == 13: return self.gain_money(25) elif key == 14: return self.house_repairs() elif",
"(Player Object) Player that the card will be affecting \"\"\" def __init__(self, location,",
"Chance(position, name)} elif class_type == \"CommunityChest\": return {position: CommunityChest(position, name)} elif class_type ==",
"abstractmethod from UserEntity import Player, Bank, FreeParking from InputValidation import get_yes_or_no_input from random",
"Name of the location ---- New in PercentTax Class ---- :percent: percent to",
"the player the option to purchase the tile, if the tile is purchased,",
"of the Tile :param price: (int) purchase cost of the tile \"\"\" self.location",
"Class for all locations on the board Attributes: :location: (int) position, (0 -",
"Tile the active player will be moved to \"\"\" # Checks if player",
"$\" + str(self.price) + \"? y/n\") return buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged",
"def read_in_board(cls): \"\"\" read in a board from file. Each line in the",
"def set_cost_per_house(location): \"\"\" Determines the price for one house based on the location",
"True while loop_value: try: if get_yes_or_no_input('Would You Like To Use The Standard Board?'):",
"def ask_buy_or_pass(self): \"\"\" Asks the player if they would like to purchase the",
"TileFactory.create_tile(data) cls.spaces.update(new_tile) loop_value = False except FileNotFoundError: if file_name == \"Q\": quit() print(\"File",
"Bank.exchange_money(self.owner, self.price) self.owner = Bank def unmortgage(self, player): \"\"\" Sets is_mortgaged to False,",
"8: return self.go_back(3) elif key == 9: return self.go_to_jail() elif key == 10:",
"{} I don't know how to make you pay everyone else... yet\".format(self.active_player)) def",
"board :name: (String) Name of the location \"\"\" @abstractmethod def __init__(self, location, name):",
"Name of the Tile \"\"\" super().__init__(location, name) def draw_card(self): \"\"\" Chooses a random",
"class JustVisiting(CornerTile): \"\"\" Models Just Visiting (jail) tile Attributes: ---- From CornerTile Class",
"is the property \"\"\" spaces = {} @classmethod def default_board(cls): \"\"\" Builds a",
"data)} elif class_type == \"Utility\": return {position: Utility(position, name)} elif class_type == \"Railroad\":",
"the property :rent: (1x6 array-like) Rent tiers for the property :number_of_houses: (int) Number",
"output = \"{0} {1}\" \\ \"\\n\\t{2}\".format(self.location, self.name, self.format_owner()) return output class Property(Location): \"\"\"",
"of the tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" @abstractmethod def",
"name, property_data): \"\"\" :param location: (Int) position on the board, int from 0",
"self.gain_money(100) elif key == 11: return self.lose_money(50) elif key == 12: return self.lose_money(150)",
"x in [2, 17, 33]} free_parking = {20: FreeParking()} cls.spaces.update(streets) cls.spaces.update(railroads) cls.spaces.update(utilities) cls.spaces.update(chances)",
"super().__init__(location, name, price) def owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between",
"key == 4: return self.get_out_of_jail_free() elif key == 5: return self.go_to_jail() elif key",
"Card Class ---- :active_player: (Player Object) Player that the card will be affecting",
"== 0: print(\"Lucky for you, you have no houses\") else: print(\"You paid: $\",",
"location='jail', name='jail'): super().__init__(location, name) def landed_on(self, player): pass def __str__(self): return \"This is",
"\"\"\" num_railroads_owned = 0 cost = {1: 50, 2: 100, 3: 150, 4:",
"price=150): \"\"\" :param location: (int) Location, (0 - 39) on the monopoly board",
"Attributes: :location: (int) position, (0 - 39), on the monopoly board :name: (String)",
"num_railroads_owned = 0 cost = {1: 50, 2: 100, 3: 150, 4: 200}",
"+ 1 passed_go = False while not isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1",
"while not isinstance( Board.spaces[location_to_check], class_type): location_to_check += 1 if location_to_check > 39: location_to_check",
"be affecting \"\"\" def __init__(self, location, name): \"\"\" :param location: (int) Location, (0",
"player pays all other players specified amount :param amount: (int) amount to pay",
"\"\"\" Parent Class for Chance and Community Chest Cards Attributes: ---- From Effect",
"player: (Player Object) Player that is unmortgageing the tile \"\"\" self.is_mortgaged = False",
"200 self.active_player.position = tile_num print(\"You've been moved to :\", Board.spaces[tile_num], \"\\n\\tTile Number:\", tile_num)",
":return: (String) Description of the tile \"\"\" output = \"{0} {1}\".format(self.location, self.name) return",
"the tile \"\"\" if self.owner == Bank: self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player)",
"0 cost = {1: 50, 2: 100, 3: 150, 4: 200} for key",
"From Card Class ---- :location: (int) position, (0 - 39), on the monopoly",
"the player would like to buy False if the player would not like",
"__init__(self, location, name, price): \"\"\" :param location: (int) Location, (0 - 39) on",
"(int) cost for one house \"\"\" if location > 30: return 200 elif",
"(Boolean) mortgage state of the tile ---- New in Property Class ---- :color:",
"self.price = self.price * 2 def owned_by_player(self, player): \"\"\" Charges player rent, transfers",
"self.advance_to_tile(0) elif key == 1: return self.advance_to_tile(24) elif key == 2: return self.advance_to_tile(11)",
"board Does not include railroads or utilities Attributes: ---- From Location Class ----",
"active player \"\"\" print(\"You've lost $\", amount) Board.spaces[20].exchange_money(self.active_player, amount) def get_out_of_jail_free(self): \"\"\" Gives",
"that tiles landed_on method. :param num_tiles: (int) Number of tiles to be moved",
"rent between owner and player :param player: (Player Object) Player that landed on",
"key == 2: return self.advance_to_tile(11) elif key == 3: return self.advance_to_next(Utility) elif key",
"output class Property(Location): \"\"\" Defines all the Properties on the board Does not",
"elif key == 2: return self.lose_money(50) elif key == 3: return self.gain_money(50) elif",
"Defines all 4 railroads Attributes: ---- From Location Class ---- :location: (int) position,",
"\"? y/n\") return buy_or_pass def mortgage(self): \"\"\" Sets is_mortgaged to True, Gives owner",
":cost_per_house: (int) Price of one house \"\"\" def __init__(self, location, name, property_data): \"\"\"",
"Cards Attributes: ---- From Effect Class ---- :location: (int) position, (0 - 39),",
"x in [5, 15, 25, 35]} utilities = {x: Utility(x, \"Name\") for x",
"player \"\"\" print(\"You've gained $\", amount) self.active_player.money += amount def lose_money(self, amount): \"\"\"",
"elif key == 14: return self.house_repairs() elif key == 15: return self.gain_money(10) elif",
"effects -------------- def advance_to_tile(self, tile_num): \"\"\" Moves player to specified tile and calls",
"in SetTax Class ---- :amount: Amount to tax the player \"\"\" def __init__(self,",
"player the option to purchase the tile, if the tile is purchased, transfers",
"card and calls the appropriate method \"\"\" key = randint(0, 16) if key",
"(Optional, int, default=150) purchase cost of the tile \"\"\" super().__init__(location, name, price) def",
"return self.lose_money(15) elif key == 12: return self.advance_to_tile(5) elif key == 13: return",
"15: return self.gain_money(150) elif key == 16: return self.gain_money(100) else: return print(\"Bad Chance",
"FreeParking(position)} elif class_type == \"Go\": return {0: Go()} elif class_type == 'JustVisiting': return",
"Amount to tax the player \"\"\" def __init__(self, location, name, amount): \"\"\" :param",
"the player \"\"\" self.amount = int(amount) super().__init__(location, name) def landed_on(self, player): \"\"\" Takes",
"price :param player: (Player Object) Player that is unmortgageing the tile \"\"\" self.is_mortgaged",
"self.advance_to_tile(11) elif key == 3: return self.advance_to_next(Utility) elif key == 4: return self.advance_to_next(Railroad)",
"self.percent) def __str__(self): \"\"\" :return: (String) Easy to read tile description \"\"\" output",
"elif class_type == 'GoToJail': return {30: GoToJail()} elif class_type == 'jail': return {'jail':",
"name)} elif class_type == \"CommunityChest\": return {position: CommunityChest(position, name)} elif class_type == \"SetTax\":",
"> 20: return 150 elif location > 10: return 100 else: return 50",
"no houses\") else: print(\"You paid: $\", owed_money) Board.spaces[20].exchange_money(self.active_player, owed_money) def pay_all_other_players(self, amount): \"\"\"",
"player to the next tile of specified class type :param class_type: (Object) class",
"JustVisiting()} elif class_type == 'GoToJail': return {30: GoToJail()} elif class_type == 'jail': return",
"the monopoly board :name: (String) Name of the location ---- New in PercentTax",
"self.ask_buy_or_pass() if buy_or_pass: # buy player.money = player.money - self.price self.owner = player",
"in range(0, 40)} railroads = {x: Railroad(x, \"Name\") for x in [5, 15,",
"of the tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" def __init__(self,",
"5 Zero is No houses Five is a hotel :cost_per_house: (int) Price of",
"Five is a hotel :cost_per_house: (int) Price of one house \"\"\" def __init__(self,",
"the board \"\"\" while True: try: if data is not None: position =",
"output class Chance(Card): \"\"\" All Chance Cards Attributes: ---- From Card Class ----",
"repairs \"\"\" owed_money = 0 for key in Board.spaces: try: if Board.spaces[key].owner ==",
"monopoly board :name: (String) Name of the location \"\"\" def __init__(self, location=10, name=\"JustVisiting\"):",
"\"\"\" All Community Chest Cards Attributes: ---- From Card Class ---- :location: (int)",
"have no attribute owner, skipped pass print(\"House repairs are expensive!\") if owed_money ==",
"pay other players \"\"\" # TODO: implement pay all other players print(\"Lucky for",
"print(\"House repairs are expensive!\") if owed_money == 0: print(\"Lucky for you, you have",
"Not found, please try again.\\n\\tOr Enter Q to quit\\n\") @classmethod def __str__(cls): \"\"\"",
"on the monopoly board :param name: (String) Name of the Tile \"\"\" super().__init__(location,",
"key == 6: return self.gain_money(50) elif key == 7: self.get_out_of_jail_free() elif key ==",
"not dependant on the player's wealth Attributes: ---- From Effect Class ---- :location:",
"I don't know how to make you pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self,",
"House: {4}, Number Of Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name, self.color,",
"on tile \"\"\" num_railroads_owned = 0 cost = {1: 50, 2: 100, 3:",
"Bank self.is_mortgaged = False super().__init__() def landed_on(self, player): \"\"\" Calls the proper function",
"player house repairs \"\"\" owed_money = 0 for key in Board.spaces: try: if",
"tile \"\"\" buy_or_pass = self.ask_buy_or_pass() if buy_or_pass: # buy player.money = player.money -",
"class Location(ABC): \"\"\" Abstract Parent Class for all locations on the board Attributes:",
"str(tier) + ', ' owned_by = self.format_owner() output = \"{0} {1} {2}\" \\",
"elif key == 1: return self.gain_money(200) elif key == 2: return self.lose_money(50) elif",
"to 1/2 price, Sets owner to Bank, \"\"\" self.is_mortgaged = True self.price =",
"Easy to read tile description \"\"\" rent_tiers = '' for tier in self.rent:",
"of the property :rent: (1x6 array-like) Rent tiers for the property :number_of_houses: (int)",
"Player that is unmortgageing the tile \"\"\" self.is_mortgaged = False self.price = self.price",
"calls that tile's landed_on method :param tile_num: (int) Tile the active player will",
"Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class Utility(Location): \"\"\" Defines all utilities i.e. Electric",
"2: return self.lose_money(50) elif key == 3: return self.gain_money(50) elif key == 4:",
"Name of the Tile :param price: (Optional, int, default=200) purchase cost of the",
"active player to the next tile of specified class type :param class_type: (Object)",
"Utility, Card \"\"\" location_to_check = self.active_player.position + 1 passed_go = False while not",
"self.lose_money(150) elif key == 13: return self.gain_money(25) elif key == 14: return self.house_repairs()",
"Card\") for x in [7, 22, 36]} community_chest = {x: CommunityChest(x, \"Community Chest",
"in from a file :return: A tile to be added to the board",
"to make you pay everyone else... yet\".format(self.active_player)) def get_money_from_all_other_players(self, amount): \"\"\" Active player",
":param player: (Player Object) Player that landed on tile \"\"\" self.owner.exchange_money(player, self.rent[self.number_of_houses]) def",
"def __init__(self, location, name, amount): \"\"\" :param location: (int) Location, (0 - 39)",
"Bank: self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives the",
"{1}\".format(self.location, self.name) return output class Chance(Card): \"\"\" All Chance Cards Attributes: ---- From",
"sets is_mortgaged to False :param player: (Player Object) Player that landed on the",
"self.percent = float(percent) super().__init__(location, name) def landed_on(self, player): \"\"\" Charges player percent of",
"\"\"\" super().__init__(location, name) def draw_card(self): \"\"\" Chooses a random random card and calls",
"player \"\"\" self.percent = float(percent) super().__init__(location, name) def landed_on(self, player): \"\"\" Charges player",
"0 for key in Board.spaces: try: if Board.spaces[key].owner == self.active_player: hold = Board.spaces[key].number_of_houses",
"on the monopoly board :param name: (String) Name of the Tile \"\"\" self.location",
"== 4: return self.get_out_of_jail_free() elif key == 5: return self.go_to_jail() elif key ==",
"gain_money(self, amount): \"\"\" Give player money :param amount: (int) Amount of money to",
"elif self.owner != player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives the player the",
"of the tile \"\"\" @abstractmethod def __init__(self, location, name, price): \"\"\" :param location:",
":param name: (String) Name of the Tile :param price: (Optional, int, default=150) purchase",
"39), on the monopoly board :name: (String) Name of the location :active_player: (Player",
"Tile \"\"\" super().__init__(location, name) def draw_card(self): \"\"\" Chooses a random random card and",
"elif key == 14: return self.pay_all_other_players(50) elif key == 15: return self.gain_money(150) elif",
"file should be formatted as follows: Square# ClassType class data \"\"\" loop_value =",
"where an effect is applied. Including Chance, Community Chest, Income tax, etc. Attributes:",
"== 6: return self.gain_money(50) elif key == 7: self.get_out_of_jail_free() elif key == 8:",
"Formatted __str__ method for all objects in spaces \"\"\" output = '' for",
"price :return: (Boolean) True if the player would like to buy False if",
":param name: (String) Name of the Tile :param price: (int) purchase cost of",
"- 39) on the monopoly board :param name: (String) Name of the Tile",
"file_name = input(\"Please enter the file Name: \") with open('boards/' + file_name) as",
"{10: JustVisiting()} elif class_type == 'GoToJail': return {30: GoToJail()} elif class_type == 'jail':",
"landed_on method :param tile_num: (int) Tile the active player will be moved to",
"self.location = location self.name = name @abstractmethod def landed_on(self, player): pass def __str__(self):",
"name='jail'): super().__init__(location, name) def landed_on(self, player): pass def __str__(self): return \"This is the",
"160]) for x in range(0, 40)} railroads = {x: Railroad(x, \"Name\") for x",
"return 150 elif location > 10: return 100 else: return 50 def __str__(self):",
"num_tiles print(\"You've been sent back \", num_tiles, \"tiles.\", \"\\nYou're now on tile number:",
"the appropriate method \"\"\" key = randint(0, 16) if key == 0: return",
"import randint from Exceptions import TilesClassNotFoundError class Location(ABC): \"\"\" Abstract Parent Class for",
"will be moved to \"\"\" # Checks if player will pass go if",
"class GoToJail(CornerTile): \"\"\" Class that sends people to jail \"\"\" def __init__(self, location=30,",
"tile_num: self.active_player.money += 200 self.active_player.position = tile_num print(\"You've been moved to :\", Board.spaces[tile_num],",
"other players print(\"Lucky for {} I don't know how to make you pay",
"[data[3], ] + last_part_data if class_type == \"Property\": return {position: Property(position, name, data)}",
"Player that landed on tile \"\"\" num_railroads_owned = 0 cost = {1: 50,",
"to read tile description \"\"\" output = \"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\",
":param name: (Optional, String, default=JustVisiting) Name of the Tile \"\"\" super().__init__(location, name) def",
"\"Owner: {0}, Current Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by = \"Owner: {0},",
"Location Class ---- :location: (int) position, (0 - 39), on the monopoly board",
"file Name: \") with open('boards/' + file_name) as file: for line in file:",
"4: return self.advance_to_next(Railroad) elif key == 5: return self.advance_to_next(Railroad) elif key == 6:",
"= {} @classmethod def default_board(cls): \"\"\" Builds a default board for testing \"\"\"",
"False except FileNotFoundError: if file_name == \"Q\": quit() print(\"File Not found, please try",
"Per House: {4}, Number Of Houses: {5}\" \\ \"\\n\\tRent Tiers {6}\"\\ .format(self.location, self.name,",
"amount: (int) amount to pay other players \"\"\" # TODO: implement pay all",
"Chance and Community Chest Cards Attributes: ---- From Effect Class ---- :location: (int)",
"data \"\"\" loop_value = True while loop_value: try: if get_yes_or_no_input('Would You Like To",
"\"\"\" self.active_player = None super().__init__(location, name) def landed_on(self, player): \"\"\" Sets Active player",
"Active player gets money from all other players :param amount: (int) amount gotten",
"= 0 for key in Board.spaces: try: if Board.spaces[key].owner == self.active_player: hold =",
"loop_value = True while loop_value: try: if get_yes_or_no_input('Would You Like To Use The",
"---- :location: (int) position, (0 - 39), on the monopoly board :name: (String)",
"property, 0 - 5 Zero is No houses Five is a hotel :cost_per_house:",
"elif key == 13: return self.advance_to_tile(39) elif key == 14: return self.pay_all_other_players(50) elif",
"for testing \"\"\" cls.spaces = {} streets = {x: Property(x, \"Name\", [\"Color\", 150,",
"for key in cls.spaces: output = output + \"\\n\" + cls.spaces[key].__str__() return output",
"amount: (int) amount to tax the player \"\"\" self.amount = int(amount) super().__init__(location, name)",
":spaces: (Dict) A dictionary where the key is the location of a tile",
"try: data = [int(x) for x in data[3:]] except ValueError: last_part_data = [int(x)",
"(Optional, String, default=GO) Name of the Tile \"\"\" super().__init__(location, name) def landed_on(self, player):",
"Property(Location): \"\"\" Defines all the Properties on the board Does not include railroads",
"on the board :return: (int) cost for one house \"\"\" if location >",
":is_mortgaged: (Boolean) mortgage state of the tile \"\"\" def __init__(self, location, name, price=200):",
"Current Rent {1}\" \\ .format(self.owner, self.format_current_rent()) else: owned_by = \"Owner: {0}, Price: {1},",
"name, data[0])} elif class_type == \"FreeParking\": return {position: FreeParking(position)} elif class_type == \"Go\":",
"self.advance_to_next(Railroad) elif key == 6: return self.gain_money(50) elif key == 7: self.get_out_of_jail_free() elif",
"{1: 50, 2: 100, 3: 150, 4: 200} for key in self.owner.owned_properites: if",
"player): \"\"\" Charges player percent of their total wealth and gives it to",
"self.owner = Bank def unmortgage(self, player): \"\"\" Sets is_mortgaged to False, Sets price",
"is No houses Five is a hotel :cost_per_house: (int) Price of one house",
"self.name, self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output def format_current_rent(self): \"\"\" Formats Current",
"to give active player \"\"\" print(\"You've gained $\", amount) self.active_player.money += amount def",
"\"\"\" Charges player a set tax amount, is not dependant on the player's",
"price self.owner = Bank self.is_mortgaged = False super().__init__() def landed_on(self, player): \"\"\" Calls",
"\"Chance Card\") for x in [7, 22, 36]} community_chest = {x: CommunityChest(x, \"Community",
"\"\"\" def __init__(self, location, name, percent): \"\"\" :param location: (int) Location, (0 -",
"{position: Railroad(position, name)} elif class_type == \"Chance\": return {position: Chance(position, name)} elif class_type",
"\"\"\" Models GO Tile Attributes: ---- From CornerTile Class ---- :location: (int) position,",
"tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" def __init__(self, location, name,",
"property_data: (1x9 array-like) list with various data formatted as follows [\"Color\", Price, rent,",
"class Railroad(Location): \"\"\" Defines all 4 railroads Attributes: ---- From Location Class ----",
"self.owned_by_bank(player) elif self.owner != player: self.owned_by_player(player) def owned_by_bank(self, player): \"\"\" Gives the player",
":name: (String) Name of the location ---- New In Card Class ---- :active_player:",
"6: return self.get_money_from_all_other_players(50) elif key == 7: return self.gain_money(100) elif key == 8:",
"Number of houses on the property, 0 - 5 Zero is No houses",
"player to jail, player does not pass go and does not collect $200",
"amount): \"\"\" Give player money :param amount: (int) Amount of money to give",
"Location, (0 - 39) on the monopoly board :param name: (Optional, String, default=GO)",
"to jail \"\"\" def __init__(self, location=30, name='Go To Jail'): super().__init__(location, name) def landed_on(self,",
"self.color, owned_by, self.cost_per_house, self.number_of_houses, rent_tiers) return output def format_current_rent(self): \"\"\" Formats Current rent",
"tile :owner: (UserEntity Object) Current Owner of the tile :is_mortgaged: (Boolean) mortgage state",
"rent_1_house, ..., rent_hotel] \"\"\" self.color = property_data[0] self.rent = property_data[2:] self.number_of_houses = 0",
"class_type): \"\"\" Advances active player to the next tile of specified class type",
"def landed_on(self, player): \"\"\" Takes amount from player and adds it to Free",
"to tax the player \"\"\" def __init__(self, location, name, percent): \"\"\" :param location:",
"board, int from 0 to 39 :param property_data: (1x9 array-like) list with various",
"position on the board, int from 0 to 39 :param property_data: (1x9 array-like)",
"amount: (int) amount of money to take from active player \"\"\" print(\"You've lost",
"for x in [7, 22, 36]} community_chest = {x: CommunityChest(x, \"Community Chest Card\")",
"Owner of the tile :is_mortgaged: (Boolean) mortgage state of the tile \"\"\" @abstractmethod",
"array-like) list with various data formatted as follows [\"Color\", Price, rent, rent_1_house, ...,",
"Board.spaces[self.active_player.position].landed_on(self.active_player) def advance_to_next(self, class_type): \"\"\" Advances active player to the next tile of",
"tax the player \"\"\" self.amount = int(amount) super().__init__(location, name) def landed_on(self, player): \"\"\"",
"for x in data[4:]] data = [data[3], ] + last_part_data if class_type ==",
"3: return self.advance_to_next(Utility) elif key == 4: return self.advance_to_next(Railroad) elif key == 5:",
"owned_by_player(self, player): \"\"\" Charges player rent, transfers rent between owner and player :param",
"name='Go To Jail'): super().__init__(location, name) def landed_on(self, player): player.position = 'jail' print(\"Go To",
"tile_num): \"\"\" Moves player to specified tile and calls that tile's landed_on method",
"Use The Standard Board?'): file_name = 'StandardBoard' else: file_name = input(\"Please enter the",
"self.price self.owner = player player.owned_properites.update({self.location: self}) self.is_mortgaged = False self.price = self.price *",
"if they would like to purchase the property, displays the Name and price",
"buy \"\"\" buy_or_pass = get_yes_or_no_input( \"Would you like to buy \" + self.name",
"tile description \"\"\" output = \"{0} {1}\".format(self.location, self.name) return output class Chance(Card): \"\"\"",
"amount = amount * -1 self.pay_all_other_players(amount) def __str__(self): \"\"\" :return: (String) Easy to",
"return self.gain_money(50) elif key == 4: return self.get_out_of_jail_free() elif key == 5: return",
"\"\"\" Sets is_mortgaged to True, Gives owner mortgage value (1/2 price), Sets price",
"player and adds it to Free Parking :param player: (Player Object) Player that",
"return self.gain_money(100) elif key == 8: return self.gain_money(20) elif key == 9: return",
"output + \"\\n\" + cls.spaces[key].__str__() return output # construct the default board for",
"Formats Current rent for __str__ :return: (String) Current Rent \"\"\" return str(self.rent[self.number_of_houses]) class",
"self.owner.exchange_money(player, self.rent[self.number_of_houses]) def ask_buy_or_pass(self): \"\"\" Asks the player if they would like to",
"Class For Each of the corner tiles Excluding Free Parking. Attributes: :location: (int)",
"CC draw\") class Board(object): \"\"\" The Monopoly Board Attributes: :spaces: (Dict) A dictionary",
"def __init__(self, location, name, price=150): \"\"\" :param location: (int) Location, (0 - 39)",
"---- From Location Class ---- :location: (int) position, (0 - 39), on the",
"adds it to Free Parking :param player: (Player Object) Player that landed on",
":name: (String) Name of the location :price: (int) purchase cost of the tile",
"location_to_check = location_to_check % 40 passed_go = True self.active_player.position = location_to_check if passed_go:",
"the location :param location: (int) location on the board :return: (int) cost for",
"Number: \", self.active_player.position) return Board.spaces[self.active_player.position].landed_on(self.active_player) def gain_money(self, amount): \"\"\" Give player money :param",
"multiplier = {1: 4, 2: 10} roll = randint(1, 6) for key in",
"tile and the content is the property \"\"\" spaces = {} @classmethod def",
"39 :param property_data: (1x9 array-like) list with various data formatted as follows [\"Color\",",
"repairs are expensive!\") if owed_money == 0: print(\"Lucky for you, you have no",
"class for all squares where an effect is applied. Including Chance, Community Chest,",
"name self.price = price self.owner = Bank self.is_mortgaged = False super().__init__() def landed_on(self,",
"the player \"\"\" def __init__(self, location, name, percent): \"\"\" :param location: (int) Location,",
"player): \"\"\" Gives the player the option to purchase the tile, if the",
"(Int) position on the board, int from 0 to 39 :param property_data: (1x9",
"(jail) tile Attributes: ---- From CornerTile Class ---- :location: (int) position, (0 -",
"= False except FileNotFoundError: if file_name == \"Q\": quit() print(\"File Not found, please",
"int(amount) super().__init__(location, name) def landed_on(self, player): \"\"\" Takes amount from player and adds",
"output = \"{0} {1}\" \\ \"\\n\\tTax percent: {2}%\"\\ .format(self.location, self.name, self.percent) class CornerTile(ABC):",
"player to player, then calls draw_card() :param player: (Player Object) Player that landed"
] |
[
"critical_numbers.logic import api_requests, converter @click.group() def cli(): pass @cli.command('serve') def serve(): '''serves webapp",
"<gh_stars>1-10 import click import json from critical_numbers import app from critical_numbers.logic import api_requests,",
"@click.group() def cli(): pass @cli.command('serve') def serve(): '''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall')",
"= api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON",
"api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of",
"HOT Tasking Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson',",
"click import json from critical_numbers import app from critical_numbers.logic import api_requests, converter @click.group()",
"click.echo('GeoJSON of all hot-tm projects succsesfully written to \"hot-tm-projects.geojson\"') if __name__ == '__main__':",
"def cli(): pass @cli.command('serve') def serve(): '''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def",
"cli(): pass @cli.command('serve') def serve(): '''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall():",
"from critical_numbers import app from critical_numbers.logic import api_requests, converter @click.group() def cli(): pass",
"hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all",
"of all hot-tm projects succsesfully written to \"hot-tm-projects.geojson\"') if __name__ == '__main__': cli()",
"app.run() @cli.command('getall') def getall(): '''gets all projects from the HOT Tasking Manager as",
"def getall(): '''gets all projects from the HOT Tasking Manager as GeoJSON''' hot_tm_projects",
"as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm projects succsesfully written to \"hot-tm-projects.geojson\"')",
"GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects,",
"api_requests, converter @click.group() def cli(): pass @cli.command('serve') def serve(): '''serves webapp to 127.0.0.1:5000'''",
"app from critical_numbers.logic import api_requests, converter @click.group() def cli(): pass @cli.command('serve') def serve():",
"json from critical_numbers import app from critical_numbers.logic import api_requests, converter @click.group() def cli():",
"webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets all projects from the HOT",
"def serve(): '''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets all projects",
"from the HOT Tasking Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects)",
"critical_numbers import app from critical_numbers.logic import api_requests, converter @click.group() def cli(): pass @cli.command('serve')",
"hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f)",
"'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm projects succsesfully written to",
"@cli.command('serve') def serve(): '''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets all",
"as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f:",
"f) click.echo('GeoJSON of all hot-tm projects succsesfully written to \"hot-tm-projects.geojson\"') if __name__ ==",
"Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as",
"json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm projects succsesfully written to \"hot-tm-projects.geojson\"') if __name__",
"import api_requests, converter @click.group() def cli(): pass @cli.command('serve') def serve(): '''serves webapp to",
"pass @cli.command('serve') def serve(): '''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets",
"Tasking Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w')",
"converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm projects",
"@cli.command('getall') def getall(): '''gets all projects from the HOT Tasking Manager as GeoJSON'''",
"with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm projects succsesfully",
"getall(): '''gets all projects from the HOT Tasking Manager as GeoJSON''' hot_tm_projects =",
"serve(): '''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets all projects from",
"from critical_numbers.logic import api_requests, converter @click.group() def cli(): pass @cli.command('serve') def serve(): '''serves",
"projects from the HOT Tasking Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects =",
"f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm projects succsesfully written to \"hot-tm-projects.geojson\"') if",
"import click import json from critical_numbers import app from critical_numbers.logic import api_requests, converter",
"'''gets all projects from the HOT Tasking Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats()",
"to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets all projects from the HOT Tasking",
"127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets all projects from the HOT Tasking Manager",
"import app from critical_numbers.logic import api_requests, converter @click.group() def cli(): pass @cli.command('serve') def",
"converter @click.group() def cli(): pass @cli.command('serve') def serve(): '''serves webapp to 127.0.0.1:5000''' app.run()",
"the HOT Tasking Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects = converter.convert_to_geojson(hot_tm_projects) with",
"'''serves webapp to 127.0.0.1:5000''' app.run() @cli.command('getall') def getall(): '''gets all projects from the",
"all projects from the HOT Tasking Manager as GeoJSON''' hot_tm_projects = api_requests.get_stats() hot_tm_projects",
"= converter.convert_to_geojson(hot_tm_projects) with open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm",
"import json from critical_numbers import app from critical_numbers.logic import api_requests, converter @click.group() def",
"open('hot-tm-projects.geojson', 'w') as f: json.dump(hot_tm_projects, f) click.echo('GeoJSON of all hot-tm projects succsesfully written"
] |