ngram
listlengths
0
67.8k
[ "Maximize) in str(e.value) obj = LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as e: obj.sense", "with pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense must be one", "= LpObjective(name='', expression=None, constant=0) assert obj assert obj.sense == Minimize obj.sense = Maximize", "expression=None, constant=0, sense='') assert \"Sense must be one of %s, %s not \"", "pytest from flipy.lp_objective import LpObjective, Minimize, Maximize @pytest.fixture def objective(name='', expression=None, constant=0): return", "expression=None, constant=0) assert obj assert obj.sense == Minimize obj.sense = Maximize assert obj.sense", "obj = LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as e: obj.sense = 'maximize' assert", "TestLpExpression(object): def test_init(self): obj = LpObjective(name='', expression=None, constant=0) assert obj assert obj.sense ==", "e: LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense must be one of %s, %s", "be one of %s, %s not \" % (Minimize, Maximize) in str(e.value) obj", "constant=0) assert obj assert obj.sense == Minimize obj.sense = Maximize assert obj.sense ==", "obj.sense = Maximize assert obj.sense == Maximize def test_bad_sense(self): with pytest.raises(ValueError) as e:", "constant=0): return LpObjective(name, expression, constant) class TestLpExpression(object): def test_init(self): obj = LpObjective(name='', expression=None,", "= Maximize assert obj.sense == Maximize def test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='',", "== Minimize obj.sense = Maximize assert obj.sense == Maximize def test_bad_sense(self): with pytest.raises(ValueError)", "from flipy.lp_objective import LpObjective, Minimize, Maximize @pytest.fixture def objective(name='', expression=None, constant=0): return LpObjective(name,", "%s, %s not \" % (Minimize, Maximize) in str(e.value) obj = LpObjective(name='', expression=None,", "import pytest from flipy.lp_objective import LpObjective, Minimize, Maximize @pytest.fixture def objective(name='', expression=None, constant=0):", "assert obj.sense == Maximize def test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0,", "(Minimize, Maximize) in str(e.value) obj = LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as e:", "Maximize @pytest.fixture def objective(name='', expression=None, constant=0): return LpObjective(name, expression, constant) class TestLpExpression(object): def", "objective(name='', expression=None, constant=0): return LpObjective(name, expression, constant) class TestLpExpression(object): def test_init(self): obj =", "assert obj assert obj.sense == Minimize obj.sense = Maximize assert obj.sense == Maximize", "as e: LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense must be one of %s,", "Minimize, Maximize @pytest.fixture def objective(name='', expression=None, constant=0): return LpObjective(name, expression, constant) class TestLpExpression(object):", "'maximize' assert \"Sense must be one of %s, %s not \" % (Minimize,", "LpObjective(name, expression, constant) class TestLpExpression(object): def test_init(self): obj = LpObjective(name='', expression=None, constant=0) assert", "pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense must be one of", "obj.sense = 'maximize' assert \"Sense must be one of %s, %s not \"", "% (Minimize, Maximize) in str(e.value) obj = LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as", "%s not \" % (Minimize, Maximize) in str(e.value) obj = LpObjective(name='', expression=None, constant=0)", "= LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as e: obj.sense = 'maximize' assert \"Sense", "test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense must be", "obj assert obj.sense == Minimize obj.sense = Maximize assert obj.sense == Maximize def", "constant=0) with pytest.raises(ValueError) as e: obj.sense = 'maximize' assert \"Sense must be one", "constant) class TestLpExpression(object): def test_init(self): obj = LpObjective(name='', expression=None, constant=0) assert obj assert", "pytest.raises(ValueError) as e: obj.sense = 'maximize' assert \"Sense must be one of %s,", "= 'maximize' assert \"Sense must be one of %s, %s not \" %", "obj = LpObjective(name='', expression=None, constant=0) assert obj assert obj.sense == Minimize obj.sense =", "def test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense must", "LpObjective(name='', expression=None, constant=0) assert obj assert obj.sense == Minimize obj.sense = Maximize assert", "import LpObjective, Minimize, Maximize @pytest.fixture def objective(name='', expression=None, constant=0): return LpObjective(name, expression, constant)", "LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense must be one of %s, %s not", "Minimize obj.sense = Maximize assert obj.sense == Maximize def test_bad_sense(self): with pytest.raises(ValueError) as", "def objective(name='', expression=None, constant=0): return LpObjective(name, expression, constant) class TestLpExpression(object): def test_init(self): obj", "expression=None, constant=0) with pytest.raises(ValueError) as e: obj.sense = 'maximize' assert \"Sense must be", "with pytest.raises(ValueError) as e: obj.sense = 'maximize' assert \"Sense must be one of", "assert \"Sense must be one of %s, %s not \" % (Minimize, Maximize)", "constant=0, sense='') assert \"Sense must be one of %s, %s not \" %", "as e: obj.sense = 'maximize' assert \"Sense must be one of %s, %s", "of %s, %s not \" % (Minimize, Maximize) in str(e.value) obj = LpObjective(name='',", "must be one of %s, %s not \" % (Minimize, Maximize) in str(e.value)", "not \" % (Minimize, Maximize) in str(e.value) obj = LpObjective(name='', expression=None, constant=0) with", "test_init(self): obj = LpObjective(name='', expression=None, constant=0) assert obj assert obj.sense == Minimize obj.sense", "assert obj.sense == Minimize obj.sense = Maximize assert obj.sense == Maximize def test_bad_sense(self):", "LpObjective, Minimize, Maximize @pytest.fixture def objective(name='', expression=None, constant=0): return LpObjective(name, expression, constant) class", "one of %s, %s not \" % (Minimize, Maximize) in str(e.value) obj =", "return LpObjective(name, expression, constant) class TestLpExpression(object): def test_init(self): obj = LpObjective(name='', expression=None, constant=0)", "@pytest.fixture def objective(name='', expression=None, constant=0): return LpObjective(name, expression, constant) class TestLpExpression(object): def test_init(self):", "<reponame>LovisAnderson/flipy import pytest from flipy.lp_objective import LpObjective, Minimize, Maximize @pytest.fixture def objective(name='', expression=None,", "expression, constant) class TestLpExpression(object): def test_init(self): obj = LpObjective(name='', expression=None, constant=0) assert obj", "\" % (Minimize, Maximize) in str(e.value) obj = LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError)", "flipy.lp_objective import LpObjective, Minimize, Maximize @pytest.fixture def objective(name='', expression=None, constant=0): return LpObjective(name, expression,", "e: obj.sense = 'maximize' assert \"Sense must be one of %s, %s not", "in str(e.value) obj = LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as e: obj.sense =", "Maximize def test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0, sense='') assert \"Sense", "obj.sense == Minimize obj.sense = Maximize assert obj.sense == Maximize def test_bad_sense(self): with", "class TestLpExpression(object): def test_init(self): obj = LpObjective(name='', expression=None, constant=0) assert obj assert obj.sense", "obj.sense == Maximize def test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0, sense='')", "sense='') assert \"Sense must be one of %s, %s not \" % (Minimize,", "Maximize assert obj.sense == Maximize def test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='', expression=None,", "def test_init(self): obj = LpObjective(name='', expression=None, constant=0) assert obj assert obj.sense == Minimize", "expression=None, constant=0): return LpObjective(name, expression, constant) class TestLpExpression(object): def test_init(self): obj = LpObjective(name='',", "LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as e: obj.sense = 'maximize' assert \"Sense must", "\"Sense must be one of %s, %s not \" % (Minimize, Maximize) in", "== Maximize def test_bad_sense(self): with pytest.raises(ValueError) as e: LpObjective(name='', expression=None, constant=0, sense='') assert", "str(e.value) obj = LpObjective(name='', expression=None, constant=0) with pytest.raises(ValueError) as e: obj.sense = 'maximize'" ]
[ "\"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } } RESPONSE_JSON", "def test_module_command(requests_mock): \"\"\"Unit test for test-module command Args: requests_mock ([type]): [description] \"\"\" mock_reponse", "CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\",", "\"message\": \"Coaching Sent\" } } RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\":", "headers=HEADERS ) args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client,", "test_module_command(requests_mock): \"\"\"Unit test for test-module command Args: requests_mock ([type]): [description] \"\"\" mock_reponse =", "requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) response", "result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for test-module command Args: requests_mock ([type]):", "\"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } } RESPONSE_JSON = {", "SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) response = SecurityAdvisor.test_module(client) assert response == \"ok\"", "\"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } HEADERS = { 'Content-Type':", "test for coach-end-user command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL", "RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\",", "verify=False, proxy=False, headers=HEADERS ) args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _, result", "\"Coaching Sent\" } } RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\",", "\"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } } RESPONSE_JSON =", "\"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } HEADERS = { 'Content-Type': 'application/json',", "for coach-end-user command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL +", "'Authorization': 'Token ' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command", "URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args = {\"user\":", "'application/json', 'Authorization': 'Token ' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user", "\"message\": \"Coaching Sent\" } HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token", "assert result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for test-module command Args: requests_mock", "+ URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) response =", "\"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } HEADERS = {", "{ 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token ' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock):", "URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) response = SecurityAdvisor.test_module(client)", "import SecurityAdvisor URL_SUFFIX = 'apis/coachuser/' BASE_URL = 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": {", "\"phishing\", \"message\": \"Coaching Sent\" } } RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\",", "} RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\":", ") args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client, args)", "([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client(", "{\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client, args) assert result ==", "= 'apis/coachuser/' BASE_URL = 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\":", "mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False,", "proxy=False, headers=HEADERS ) args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _, result =", "SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _,", "== RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for test-module command Args: requests_mock ([type]): [description]", "\"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } } RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\",", "'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token ' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit", "\"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" }", "command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse)", "'apis/coachuser/' BASE_URL = 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\",", "\"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } HEADERS", "\"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } HEADERS = { 'Content-Type': 'application/json', 'Accept':", "= RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS", "Sent\" } } RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\":", "test-module command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX,", "} HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token ' + '<PASSWORD>'", "'Token ' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command Args:", "RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS )", "= { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token ' + '<PASSWORD>' } def", "+ URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args =", "= {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client, args) assert result", "\"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } }", "'application/json', 'Accept': 'application/json', 'Authorization': 'Token ' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test", "json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) response = SecurityAdvisor.test_module(client) assert", "BASE_URL = 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\":", "' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command Args: requests_mock", "requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args", "client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) response = SecurityAdvisor.test_module(client) assert response", "\"Coaching Sent\" } HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token '", "requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client =", "client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args = {\"user\": \"<EMAIL>\", \"context\":", "HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token ' + '<PASSWORD>' }", "URL_SUFFIX = 'apis/coachuser/' BASE_URL = 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\",", "\"<EMAIL>\", \"context\": \"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON", "\"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } } RESPONSE_JSON = { \"coaching_date\":", "_, _, result = SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit", "\"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\":", "<reponame>diCagri/content<filename>Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py import SecurityAdvisor URL_SUFFIX = 'apis/coachuser/' BASE_URL = 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\":", "} def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command Args: requests_mock ([type]): [description] \"\"\"", "= SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"}", "= { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\":", "result = SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for", "= SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) response = SecurityAdvisor.test_module(client) assert response ==", "'<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command Args: requests_mock ([type]): [description]", "'Accept': 'application/json', 'Authorization': 'Token ' + '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test for", "\"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False,", "{ \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\",", "\"\"\"Unit test for test-module command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON", "\"context\": \"phishing\", \"message\": \"Coaching Sent\" } } RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\":", "def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command Args: requests_mock ([type]): [description] \"\"\" mock_reponse", "base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _,", "_, result = SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test", "'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\":", "\"context\": \"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON def", "json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL, verify=False, proxy=False, headers=HEADERS ) args = {\"user\": \"<EMAIL>\",", "for test-module command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL +", "\"phishing\", \"message\": \"Coaching Sent\" } HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization':", "Sent\" } HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Token ' +", "SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for test-module command", "= { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\":", "+ '<PASSWORD>' } def test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command Args: requests_mock ([type]):", "args) assert result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for test-module command Args:", "\"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON def test_module_command(requests_mock):", "SecurityAdvisor URL_SUFFIX = 'apis/coachuser/' BASE_URL = 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\":", "Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client", "} } RESPONSE_JSON = { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\",", "[description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse) client = SecurityAdvisor.Client( base_url=BASE_URL,", "RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for test-module command Args: requests_mock ([type]): [description] \"\"\"", "= SecurityAdvisor.coach_end_user_command(client, args) assert result == RESPONSE_JSON def test_module_command(requests_mock): \"\"\"Unit test for test-module", "\"\"\"Unit test for coach-end-user command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON", "args = {\"user\": \"<EMAIL>\", \"context\": \"phishing\"} _, _, result = SecurityAdvisor.coach_end_user_command(client, args) assert", "coach-end-user command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL + URL_SUFFIX,", "\"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\" } HEADERS =", "test for test-module command Args: requests_mock ([type]): [description] \"\"\" mock_reponse = RESPONSE_JSON requests_mock.post(BASE_URL", "= 'https://www.securityadvisor.io/' CONTEXT_JSON = { \"SecurityAdvisor.CoachUser\": { \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\",", "\"context\": \"phishing\", \"message\": \"Coaching Sent\" } HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json',", "test_coach_end_user_command(requests_mock): \"\"\"Unit test for coach-end-user command Args: requests_mock ([type]): [description] \"\"\" mock_reponse =", "{ \"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching", "\"coaching_date\": \"2019-10-04T21:04:19.480425\", \"coaching_status\": \"Pending\", \"coaching_score\": \"\", \"user\": \"<EMAIL>\", \"context\": \"phishing\", \"message\": \"Coaching Sent\"" ]
[ "# select only first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co", "logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda", "to # rel_upvotes is the same for multiple values and it's not possible", "filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc in [0.1,", "filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path = filter_comments_by_category(merged_path, category='politics') filter_by_rank(path, category='politics', suffix='_new') split_train_val_test(category='politics',", "all in a list and concat once comments = pd.concat(comments_list) logger.info('Storing filtered category", "return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}') num_rows = co.shape[0]", "= f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category +", "f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")]", "co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg", "\"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if", "for category {category} and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir)", "PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS =", "res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5): # There", "+ \"/\")] # Read in chunkwise chunksize = 10 ** 6 comments_list =", "text], axis=1) del text # free memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"})", "\"comment_text\"}) # the tokenized file has a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'):", "pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] #", "t_t ext\": \"comment_text\"}) # the tokenized file has a strange col name comments.to_csv(PATH_MERGED_COMMENTS)", "Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS", "path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category", "of classes assert(res['comment_id'].is_unique) # make sure we don't have duplicates return res def", "Read in chunkwise chunksize = 10 ** 6 comments_list = [] logger.info('Reading merged", "result in 2 duplicates. Most likely due to # rel_upvotes is the same", "top_bot_perc in [0.1, 0.25, 0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'):", "is a problem when the want to bin the *whole* dataset. It would", "preprocessing steps all in one import logging import math import os import pandas", "co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20] # do not consider articles", "group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg =", "once comments = pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co):", "we don't have duplicates return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments)", "res = pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0]) # ensure", "clear cut. res = pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0) else: res_neg['class'] =", "first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes'] >", "chunkwise chunksize = 10 ** 6 comments_list = [] logger.info('Reading merged comments chunkwise')", "train_test_split(not_train, test_size=0.5) # then, val: 0.1, test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir +", "comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"}) # the tokenized file has a strange", "multiple values and it's not possible to have a clear cut. res =", "= pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0) else: res_neg['class'] = 0 res =", "articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] # Read in chunkwise chunksize = 10 **", "= '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments", "= filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path = filter_comments_by_category(merged_path, category='politics') filter_by_rank(path,", "skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized text into comment data') comments =", "has a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments", "out article in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster to", "= groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS] # select only first 10 comments", "process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path = filter_comments_by_category(merged_path,", "res_pos['class'] = 1 res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc ==", "== 1].shape[0]) # ensure same number of classes assert(res['comment_id'].is_unique) # make sure we", "process_tokenized_text(): logger.info('Reading original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text')", "filtered category comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank and", "1].shape[0]) # ensure same number of classes assert(res['comment_id'].is_unique) # make sure we don't", "= f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df,", "groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS] # select", "filter out article in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster", "import os import pandas as pd import numpy as np from sklearn.model_selection import", "val and test set for category {category} and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}'", "co = co[co['total_comments'] == 10] # remove articles with over under 10 comments.", "x: x != 'comment_text') logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0]", "it's not possible to have a clear cut. res = pd.merge(co, res_pos, how='left')", "the same for multiple values and it's not possible to have a clear", "split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25', '0.5'] for p in perc: logger.info(f'Create train,", "res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc", "val: 0.1, test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv')", "comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster to first gather all in", "val, test = train_test_split(not_train, test_size=0.5) # then, val: 0.1, test: 0.1 train.to_csv(outdir +", "= [] logger.info('Reading merged comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk =", "chunksize = 10 ** 6 comments_list = [] logger.info('Reading merged comments chunkwise') for", "would result in 2 duplicates. Most likely due to # rel_upvotes is the", "select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category comment_chunk", "not possible to have a clear cut. res = pd.merge(co, res_pos, how='left') res['class']", "row.total_upvotes) * 100, axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc", "math import os import pandas as pd import numpy as np from sklearn.model_selection", "comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100, axis=1) return co", "filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments comment_chunk =", "apply filtering') groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS]", "6 comments_list = [] logger.info('Reading merged comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize):", "for p in perc: logger.info(f'Create train, val and test set for category {category}", "comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank and apply filtering')", "_enhance_and_filter_comments(co): logger.info('Enhance comments with rank and apply filtering') groupby = co.groupby('article_id') co['rank'] =", "= groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg = groupby.apply(lambda g:", "res['class'] = res['class'].fillna(0) else: res_neg['class'] = 0 res = pd.concat([res_pos, res_neg]) assert(res[res['class'] ==", "# then, val: 0.1, test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir", "logging import math import os import pandas as pd import numpy as np", "comments with rank and apply filtering') groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co", "suffix='_fixed'): perc = ['0.1', '0.25', '0.5'] for p in perc: logger.info(f'Create train, val", "= 1 res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5):", "out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])]", "# Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__) def process_tokenized_text():", "data for perc {top_bot_perc}') num_rows = co.shape[0] N = int((num_rows * top_bot_perc) /", "= _enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25, 0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\")", "merged comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter", "co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS] # select only first", "test_size=0.5) # then, val: 0.1, test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv')", "* top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N,", "chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk =", "perc {top_bot_perc}') num_rows = co.shape[0] N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby", "category comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank and apply", "rank and apply filtering') groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank']", "0.1, test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv') if", "tokenized text into comment data') comments = pd.concat([comments, text], axis=1) del text #", "root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category comment_chunk =", "in 2 duplicates. Most likely due to # rel_upvotes is the same for", "= pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] # Read in chunkwise", "= co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100, axis=1) return co def _split_and_label(enhanced_comments,", "category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster to first gather all", "= train_test_split(df, test_size=0.2) # first train: 0.8 val, test = train_test_split(not_train, test_size=0.5) #", "have a clear cut. res = pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0) else:", "to bin the *whole* dataset. It would result in 2 duplicates. Most likely", "== text.shape[0] logger.info('Merging tokenized text into comment data') comments = pd.concat([comments, text], axis=1)", "res['class'].fillna(0) else: res_neg['class'] = 0 res = pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] ==", "under 10 upvotes co = co[co['total_comments'] == 10] # remove articles with over", "# The preprocessing steps all in one import logging import math import os", "co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25, 0.5]: co", "groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5): # There is a", "top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\",", "in perc: logger.info(f'Create train, val and test set for category {category} and perc", "merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path", "comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank and apply filtering') groupby", "memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"}) # the tokenized file has a", "100, axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}') num_rows", "'/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__) def", "res[res['class'] == 1].shape[0]) # ensure same number of classes assert(res['comment_id'].is_unique) # make sure", "# Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\"", "10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20]", "# it's faster to first gather all in a list and concat once", "PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger =", "co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}') num_rows = co.shape[0] N", "chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only", "free memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"}) # the tokenized file has", "int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g:", "train: 0.8 val, test = train_test_split(not_train, test_size=0.5) # then, val: 0.1, test: 0.1", "= process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path =", "values and it's not possible to have a clear cut. res = pd.merge(co,", "logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] #", "pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0) else: res_neg['class'] = 0 res = pd.concat([res_pos,", "AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class']", "** 6 comments_list = [] logger.info('Reading merged comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS,", "10] # remove articles with over under 10 comments. co['rel_upvotes'] = co.apply(lambda row:", "import train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower'", "comments_list = [] logger.info('Reading merged comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk", "= 10 logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments data') comments =", "= '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv'", "os import pandas as pd import numpy as np from sklearn.model_selection import train_test_split", "pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0]) # ensure same number", "upvotes co = co[co['total_comments'] == 10] # remove articles with over under 10", "axis=1) del text # free memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"}) #", "AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments data') comments", "duplicates. Most likely due to # rel_upvotes is the same for multiple values", "the tokenized file has a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\"", "consider articles with under 10 upvotes co = co[co['total_comments'] == 10] # remove", "co.shape[0] N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos", "how='left') res['class'] = res['class'].fillna(0) else: res_neg['class'] = 0 res = pd.concat([res_pos, res_neg]) assert(res[res['class']", "in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()]", "= co.shape[0] N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False)", "0].shape[0] == res[res['class'] == 1].shape[0]) # ensure same number of classes assert(res['comment_id'].is_unique) #", "and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv')", "a list and concat once comments = pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments)", "perc: logger.info(f'Create train, val and test set for category {category} and perc {p}')", "> 20] # do not consider articles with under 10 upvotes co =", "keep=\"first\")) if (top_bot_perc == 0.5): # There is a problem when the want", "for perc {top_bot_perc}') num_rows = co.shape[0] N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS)", "import numpy as np from sklearn.model_selection import train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv'", "{top_bot_perc}') num_rows = co.shape[0] N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby =", "comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles", "do not consider articles with under 10 upvotes co = co[co['total_comments'] == 10]", "# make sure we don't have duplicates return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'):", "logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x", "= pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized text into comment data')", "Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" +", "comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article", "def process_tokenized_text(): logger.info('Reading original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x !=", "pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25, 0.5]: co = _split_and_label(co,", "# rel_upvotes is the same for multiple values and it's not possible to", "else: res_neg['class'] = 0 res = pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class']", "usecols=lambda x: x != 'comment_text') logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert", "text.shape[0] logger.info('Merging tokenized text into comment data') comments = pd.concat([comments, text], axis=1) del", "\"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5): # There is a problem when the", "os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2) # first train:", "co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25', '0.5']", "assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized text into comment data') comments = pd.concat([comments,", "# only select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in", "# filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments comment_chunk", "axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}') num_rows =", "comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter", "20] # do not consider articles with under 10 upvotes co = co[co['total_comments']", "if __name__ == \"__main__\": merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport',", "articles with under 10 upvotes co = co[co['total_comments'] == 10] # remove articles", "res_pos, how='left') res['class'] = res['class'].fillna(0) else: res_neg['class'] = 0 res = pd.concat([res_pos, res_neg])", "pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized text into comment data') comments", "not consider articles with under 10 upvotes co = co[co['total_comments'] == 10] #", "data') comments = pd.concat([comments, text], axis=1) del text # free memory comments.rename(inplace=True, index=str,", "= groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20] # do not consider articles with", "= pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25, 0.5]: co =", "== 10] # remove articles with over under 10 comments. co['rel_upvotes'] = co.apply(lambda", "chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs", "when the want to bin the *whole* dataset. It would result in 2", "make sure we don't have duplicates return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co", "category + \"/\")] # Read in chunkwise chunksize = 10 ** 6 comments_list", "steps all in one import logging import math import os import pandas as", "NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] #", "_split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}') num_rows = co.shape[0] N = int((num_rows", "articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] # Read in chunkwise chunksize =", "only select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category", "(top_bot_perc == 0.5): # There is a problem when the want to bin", "logger.info(f'Create train, val and test set for category {category} and perc {p}') outdir", "'comment_text') logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging", "co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20] # do", "for top_bot_perc in [0.1, 0.25, 0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics',", "and concat once comments = pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return path_category_comments", "= ['0.1', '0.25', '0.5'] for p in perc: logger.info(f'Create train, val and test", "want to bin the *whole* dataset. It would result in 2 duplicates. Most", "= int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda", "comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category comment_chunk = comment_chunk.drop(['parent_comment_id'],", "res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg = groupby.apply(lambda", "text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized text into", "first train: 0.8 val, test = train_test_split(not_train, test_size=0.5) # then, val: 0.1, test:", "train, not_train = train_test_split(df, test_size=0.2) # first train: 0.8 val, test = train_test_split(not_train,", "train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' #", "select only first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co =", "ext\": \"comment_text\"}) # the tokenized file has a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def", "in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster to first gather", "res = pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0) else: res_neg['class'] = 0 res", "test = train_test_split(not_train, test_size=0.5) # then, val: 0.1, test: 0.1 train.to_csv(outdir + '/train.csv')", "def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank and apply filtering') groupby = co.groupby('article_id') co['rank']", "= _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25', '0.5'] for", "groupby = co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] =", "comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text') logger.info('Reading tokenized text')", "# ensure same number of classes assert(res['comment_id'].is_unique) # make sure we don't have", "all in one import logging import math import os import pandas as pd", "first gather all in a list and concat once comments = pd.concat(comments_list) logger.info('Storing", "'/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments data')", "g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5): # There is a problem", "ensure same number of classes assert(res['comment_id'].is_unique) # make sure we don't have duplicates", "= groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20] # do not", "<gh_stars>0 # The preprocessing steps all in one import logging import math import", "same for multiple values and it's not possible to have a clear cut.", "train, val and test set for category {category} and perc {p}') outdir =", "10 logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS,", "it's faster to first gather all in a list and concat once comments", "the *whole* dataset. It would result in 2 duplicates. Most likely due to", "articles with over under 10 comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes)", "res_neg['class'] = 0 res = pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class'] ==", "name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles')", "with under 10 upvotes co = co[co['total_comments'] == 10] # remove articles with", "= pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2) # first train: 0.8 val, test", "strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\"", "test_size=0.2) # first train: 0.8 val, test = train_test_split(not_train, test_size=0.5) # then, val:", "= articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] # Read in chunkwise chunksize = 10", "co[co['rank'] <= AMOUNT_OF_RANKS] # select only first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments']", "N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos =", "= groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5): # There is", "+ '/val.csv') test.to_csv(outdir + '/test.csv') if __name__ == \"__main__\": merged_path = process_tokenized_text() path", "comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) #", "It would result in 2 duplicates. Most likely due to # rel_upvotes is", "comment data') comments = pd.concat([comments, text], axis=1) del text # free memory comments.rename(inplace=True,", "in chunkwise chunksize = 10 ** 6 comments_list = [] logger.info('Reading merged comments", "= 10 ** 6 comments_list = [] logger.info('Reading merged comments chunkwise') for chunk", "def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc in", "not_train = train_test_split(df, test_size=0.2) # first train: 0.8 val, test = train_test_split(not_train, test_size=0.5)", "groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20] # do not consider", "import math import os import pandas as pd import numpy as np from", "set for category {category} and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir):", "+ category + \"/\")] # Read in chunkwise chunksize = 10 ** 6", "+ '/test.csv') if __name__ == \"__main__\": merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport')", "= pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text') logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT,", "as np from sklearn.model_selection import train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS =", "= comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out", "logger.info(f'Label data for perc {top_bot_perc}') num_rows = co.shape[0] N = int((num_rows * top_bot_perc)", "only first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes']", "a problem when the want to bin the *whole* dataset. It would result", "Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading", "# free memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"}) # the tokenized file", "/ AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\"))", "comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20] #", "/ row.total_upvotes) * 100, axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for", "test.to_csv(outdir + '/test.csv') if __name__ == \"__main__\": merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path,", "under 10 comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100, axis=1)", "'/val.csv') test.to_csv(outdir + '/test.csv') if __name__ == \"__main__\": merged_path = process_tokenized_text() path =", "not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2) # first", "filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path = filter_comments_by_category(merged_path, category='politics') filter_by_rank(path, category='politics',", "There is a problem when the want to bin the *whole* dataset. It", "# There is a problem when the want to bin the *whole* dataset.", "with rank and apply filtering') groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co =", "* 100, axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}')", "comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select", "concat once comments = pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return path_category_comments def", "a clear cut. res = pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0) else: res_neg['class']", "data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text') logger.info('Reading tokenized text') text", "comments = pd.concat([comments, text], axis=1) del text # free memory comments.rename(inplace=True, index=str, columns={\"commen", "filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES)", "<= AMOUNT_OF_RANKS] # select only first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] =", "# Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output:", "import pandas as pd import numpy as np from sklearn.model_selection import train_test_split #", "= co[co['rank'] <= AMOUNT_OF_RANKS] # select only first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum')", "for multiple values and it's not possible to have a clear cut. res", "= train_test_split(not_train, test_size=0.5) # then, val: 0.1, test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir", "problem when the want to bin the *whole* dataset. It would result in", "have duplicates return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co =", "np from sklearn.model_selection import train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv'", "AMOUNT_OF_RANKS] # select only first 10 comments co['total_upvotes'] = groupby['upvotes'].transform('sum') co['total_comments'] = groupby['upvotes'].transform('count')", "suffix='_fixed'): co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25, 0.5]:", "original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text') logger.info('Reading tokenized", "= chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root", "== res[res['class'] == 1].shape[0]) # ensure same number of classes assert(res['comment_id'].is_unique) # make", "row: (row.upvotes / row.total_upvotes) * 100, axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label", "return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for", "co = _enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25, 0.5]: co = _split_and_label(co, top_bot_perc)", "0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25',", "The preprocessing steps all in one import logging import math import os import", "columns={\"commen t_t ext\": \"comment_text\"}) # the tokenized file has a strange col name", "due to # rel_upvotes is the same for multiple values and it's not", "# \"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles", "articles') articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] # Read", "same number of classes assert(res['comment_id'].is_unique) # make sure we don't have duplicates return", "'0.5'] for p in perc: logger.info(f'Create train, val and test set for category", "comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text') logger.info('Reading tokenized text') text =", "pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2) # first train: 0.8 val, test =", "\"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles = pd.read_csv(PATH_ARTICLES) articles =", "PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS =", "and apply filtering') groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <=", "'/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS", "co[co['total_comments'] == 10] # remove articles with over under 10 comments. co['rel_upvotes'] =", "pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text') logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False)", "co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25', '0.5'] for p in perc:", "train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv') if __name__ == \"__main__\":", "10 comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100, axis=1) return", "\"/\")] # Read in chunkwise chunksize = 10 ** 6 comments_list = []", "if (top_bot_perc == 0.5): # There is a problem when the want to", "g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\",", "is the same for multiple values and it's not possible to have a", "== 0].shape[0] == res[res['class'] == 1].shape[0]) # ensure same number of classes assert(res['comment_id'].is_unique)", "logger.info('Merging tokenized text into comment data') comments = pd.concat([comments, text], axis=1) del text", "logger.info('Enhance comments with rank and apply filtering') groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int)", "category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25,", "faster to first gather all in a list and concat once comments =", "0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv') if __name__ ==", "0.5): # There is a problem when the want to bin the *whole*", "= co.groupby(\"rank\", group_keys=False) res_pos = groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1", "classes assert(res['comment_id'].is_unique) # make sure we don't have duplicates return res def filter_by_rank(path_category_comments,", "co[co['total_upvotes'] > 20] # do not consider articles with under 10 upvotes co", "top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25', '0.5'] for p in", "index=str, columns={\"commen t_t ext\": \"comment_text\"}) # the tokenized file has a strange col", "a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments =", "category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path = filter_comments_by_category(merged_path, category='politics') filter_by_rank(path, category='politics', suffix='_new')", "= co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS] # select only", "return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank and apply filtering') groupby =", "['0.1', '0.25', '0.5'] for p in perc: logger.info(f'Create train, val and test set", "with over under 10 comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes) *", "'0.25', '0.5'] for p in perc: logger.info(f'Create train, val and test set for", "_split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25', '0.5'] for p", "likely due to # rel_upvotes is the same for multiple values and it's", "co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100, axis=1) return co def", "top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}') num_rows = co.shape[0] N = int((num_rows *", "tokenized file has a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" #", "= comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster to first gather all in a", "to have a clear cut. res = pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0)", "logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized", "groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS] # select only first 10 comments co['total_upvotes']", "f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2)", "comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1)", "possible to have a clear cut. res = pd.merge(co, res_pos, how='left') res['class'] =", "= pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments", "category {category} and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df", "# first train: 0.8 val, test = train_test_split(not_train, test_size=0.5) # then, val: 0.1,", "# remove articles with over under 10 comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes", "'/test.csv') if __name__ == \"__main__\": merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path,", "0.25, 0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1',", "and test set for category {category} and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if", "def filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting articles') articles =", "axis=1) comments_list.append(comment_chunk) # it's faster to first gather all in a list and", "== \"__main__\": merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport',", "article in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster to first", "os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2) # first train: 0.8", "in one import logging import math import os import pandas as pd import", "= '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10", "logger.info('Reading original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text') logger.info('Reading", "text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized text into comment", "assert(res['comment_id'].is_unique) # make sure we don't have duplicates return res def filter_by_rank(path_category_comments, category='politics',", "in a list and concat once comments = pd.concat(comments_list) logger.info('Storing filtered category comments')", "g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\"))", "[] logger.info('Reading merged comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()]", "__name__ == \"__main__\": merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new')", "del text # free memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"}) # the", "== 0.5): # There is a problem when the want to bin the", "chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments", "train_test_split(df, test_size=0.2) # first train: 0.8 val, test = train_test_split(not_train, test_size=0.5) # then,", "in [0.1, 0.25, 0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc", "pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with", "dataset. It would result in 2 duplicates. Most likely due to # rel_upvotes", "perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train,", "= logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original comments data') comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x:", "= 0 res = pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0])", "[0.1, 0.25, 0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def split_train_val_test(category='politics', suffix='_fixed'): perc =", "for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs comment_chunk", "# do not consider articles with under 10 upvotes co = co[co['total_comments'] ==", "(row.upvotes / row.total_upvotes) * 100, axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data", "then, val: 0.1, test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir +", "numpy as np from sklearn.model_selection import train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS", "comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's faster to first gather all in a list", "= co[co['total_upvotes'] > 20] # do not consider articles with under 10 upvotes", "into comment data') comments = pd.concat([comments, text], axis=1) del text # free memory", "pd.concat([comments, text], axis=1) del text # free memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\":", "text into comment data') comments = pd.concat([comments, text], axis=1) del text # free", "def _split_and_label(enhanced_comments, top_bot_perc): logger.info(f'Label data for perc {top_bot_perc}') num_rows = co.shape[0] N =", "pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] # Read in chunkwise chunksize", "cut. res = pd.merge(co, res_pos, how='left') res['class'] = res['class'].fillna(0) else: res_neg['class'] = 0", "path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path = filter_comments_by_category(merged_path, category='politics')", "val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv') if __name__ == \"__main__\": merged_path = process_tokenized_text()", "articles = pd.read_csv(PATH_ARTICLES) articles = articles[articles['article_url'].str.contains(\"https://www.theguardian.com/\" + category + \"/\")] # Read in", "sklearn.model_selection import train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT =", "df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2) # first train: 0.8 val,", "= '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__)", "sure we don't have duplicates return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co =", "perc = ['0.1', '0.25', '0.5'] for p in perc: logger.info(f'Create train, val and", "import logging import math import os import pandas as pd import numpy as", "0 res = pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0]) #", "from sklearn.model_selection import train_test_split # Input: PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv' PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT", "test: 0.1 train.to_csv(outdir + '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv') if __name__", "co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS] # select only first 10", "_enhance_and_filter_comments(co) for top_bot_perc in [0.1, 0.25, 0.5]: co = _split_and_label(co, top_bot_perc) co.to_csv(f\"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv\") def", "comments = pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance", "= comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk)", "co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100, axis=1) return co def _split_and_label(enhanced_comments, top_bot_perc):", "PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger = logging.getLogger(__name__) def process_tokenized_text(): logger.info('Reading original", "duplicates return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co = _enhance_and_filter_comments(co)", "pandas as pd import numpy as np from sklearn.model_selection import train_test_split # Input:", "co = co[co['rank'] <= AMOUNT_OF_RANKS] # select only first 10 comments co['total_upvotes'] =", "file has a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" # Output:", "'/mnt/data/datasets/newspapers/guardian/comments.csv' PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower' # Output: PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv' AMOUNT_OF_RANKS = 10 logger", "bin the *whole* dataset. It would result in 2 duplicates. Most likely due", "filtering') groupby = co.groupby('article_id') co['rank'] = groupby['timestamp'].rank(method='dense').astype(int) co = co[co['rank'] <= AMOUNT_OF_RANKS] #", "# the tokenized file has a strange col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): #", "'/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv') if __name__ == \"__main__\": merged_path =", "= res['class'].fillna(0) else: res_neg['class'] = 0 res = pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0]", "\"__main__\": merged_path = process_tokenized_text() path = filter_comments_by_category(merged_path, category='sport') filter_by_rank(path, category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new')", "logger.info('Reading merged comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] #", "{category} and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df =", "num_rows = co.shape[0] N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS) groupby = co.groupby(\"rank\",", "comments.shape[0] == text.shape[0] logger.info('Merging tokenized text into comment data') comments = pd.concat([comments, text],", "x != 'comment_text') logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] ==", "over under 10 comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100,", "Most likely due to # rel_upvotes is the same for multiple values and", "pd import numpy as np from sklearn.model_selection import train_test_split # Input: PATH_ARTICLES =", "assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0]) # ensure same number of classes", "category='sport', suffix='_new') split_train_val_test(category='sport', suffix='_new') path = filter_comments_by_category(merged_path, category='politics') filter_by_rank(path, category='politics', suffix='_new') split_train_val_test(category='politics', suffix='_new')", "to first gather all in a list and concat once comments = pd.concat(comments_list)", "groupby.apply(lambda g: g.nlargest(N, \"rel_upvotes\", keep=\"last\")) res_pos['class'] = 1 res_neg = groupby.apply(lambda g: g.nsmallest(N,", "if not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train = train_test_split(df, test_size=0.2) #", "gather all in a list and concat once comments = pd.concat(comments_list) logger.info('Storing filtered", "test set for category {category} and perc {p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not", "1 res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5): #", "def split_train_val_test(category='politics', suffix='_fixed'): perc = ['0.1', '0.25', '0.5'] for p in perc: logger.info(f'Create", "and it's not possible to have a clear cut. res = pd.merge(co, res_pos,", "text # free memory comments.rename(inplace=True, index=str, columns={\"commen t_t ext\": \"comment_text\"}) # the tokenized", "res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0]) # ensure same number of", "= pd.concat([comments, text], axis=1) del text # free memory comments.rename(inplace=True, index=str, columns={\"commen t_t", "co = co[co['total_upvotes'] > 20] # do not consider articles with under 10", "# filter out article in category comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1) comments_list.append(comment_chunk) # it's", "list and concat once comments = pd.concat(comments_list) logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return", "= pd.concat([res_pos, res_neg]) assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0]) # ensure same", "keep=\"last\")) res_pos['class'] = 1 res_neg = groupby.apply(lambda g: g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc", "p in perc: logger.info(f'Create train, val and test set for category {category} and", "10 ** 6 comments_list = [] logger.info('Reading merged comments chunkwise') for chunk in", "groupby['upvotes'].transform('count') co = co[co['total_upvotes'] > 20] # do not consider articles with under", "rel_upvotes is the same for multiple values and it's not possible to have", "# Read in chunkwise chunksize = 10 ** 6 comments_list = [] logger.info('Reading", "the want to bin the *whole* dataset. It would result in 2 duplicates.", "as pd import numpy as np from sklearn.model_selection import train_test_split # Input: PATH_ARTICLES", "g.nsmallest(N, \"rel_upvotes\", keep=\"first\")) if (top_bot_perc == 0.5): # There is a problem when", "number of classes assert(res['comment_id'].is_unique) # make sure we don't have duplicates return res", "0.8 val, test = train_test_split(not_train, test_size=0.5) # then, val: 0.1, test: 0.1 train.to_csv(outdir", "logger.info('Storing filtered category comments') comments.to_csv(path_category_comments) return path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank", "outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train =", "col name comments.to_csv(PATH_MERGED_COMMENTS) def filter_comments_by_category(category='sport'): # \"politics\" # Output: path_category_comments = f\"/mnt/data/group07/johannes/proc_data/{category}_comments.csv\" logger.info('Collecting", "2 duplicates. Most likely due to # rel_upvotes is the same for multiple", "one import logging import math import os import pandas as pd import numpy", "= co[co['total_comments'] == 10] # remove articles with over under 10 comments. co['rel_upvotes']", "comments_list.append(comment_chunk) # it's faster to first gather all in a list and concat", "tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0] logger.info('Merging tokenized text", "path_category_comments def _enhance_and_filter_comments(co): logger.info('Enhance comments with rank and apply filtering') groupby = co.groupby('article_id')", "*whole* dataset. It would result in 2 duplicates. Most likely due to #", "!= 'comment_text') logger.info('Reading tokenized text') text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False) assert comments.shape[0] == text.shape[0]", "+ '/train.csv') val.to_csv(outdir + '/val.csv') test.to_csv(outdir + '/test.csv') if __name__ == \"__main__\": merged_path", "comments chunkwise') for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize): comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out", "10 upvotes co = co[co['total_comments'] == 10] # remove articles with over under", "remove articles with over under 10 comments. co['rel_upvotes'] = co.apply(lambda row: (row.upvotes /", "don't have duplicates return res def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'): co = pd.read_csv(path_category_comments) co", "{p}') outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}' if not os.path.exists(outdir): os.mkdir(outdir) df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv') train, not_train" ]
[ "''' 21cm 전파를 보내는 행성상성운과 우리 은하 사이 상대적 속도를 계산합니다. 21cm 전파의", "- lamda_0/lambda_0)*c ''' from astropy import constants as const import math lambda_obsereved =", "lamda_0/lambda_0)*c ''' from astropy import constants as const import math lambda_obsereved = float(input(\"Enter", "def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved - lambda_0)/ lambda_0 )*c ) func_radial_velocity(lambda_obsereved, lambda_0,", "print(\" \\n V_r = \" , end='', flush = True) func_V_r() def func_radial_velocity(lambda_obsereved,", "같이 주어집니다. V_r = z * c = (lambda_obsereved - lamda_0/lambda_0)*c ''' from", "lambda_0 = 5006.84 c = const.c.to('km/s') def func_V_r(): print(\" \\n V_r = \"", "때 시선속도 V_r 은 다음과 같이 주어집니다. V_r = z * c =", "wavelength (lambda_obsereved): \")) lambda_0 = 5006.84 c = const.c.to('km/s') def func_V_r(): print(\" \\n", "z * c = (lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy import constants as", "\\n V_r = \" , end='', flush = True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0,", "= \" , end='', flush = True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print(", "계산합니다. 21cm 전파의 lamda_0 는 5006.84 A 이고,관측된 파장 lamda 와 적색편이 z가", "lamda_0 는 5006.84 A 이고,관측된 파장 lamda 와 적색편이 z가 주어졌을 때 시선속도", "float(input(\"Enter the obeserved wavelength (lambda_obsereved): \")) lambda_0 = 5006.84 c = const.c.to('km/s') def", "상대적 속도를 계산합니다. 21cm 전파의 lamda_0 는 5006.84 A 이고,관측된 파장 lamda 와", "속도를 계산합니다. 21cm 전파의 lamda_0 는 5006.84 A 이고,관측된 파장 lamda 와 적색편이", "the obeserved wavelength (lambda_obsereved): \")) lambda_0 = 5006.84 c = const.c.to('km/s') def func_V_r():", "= const.c.to('km/s') def func_V_r(): print(\" \\n V_r = \" , end='', flush =", "21cm 전파를 보내는 행성상성운과 우리 은하 사이 상대적 속도를 계산합니다. 21cm 전파의 lamda_0", "''' from astropy import constants as const import math lambda_obsereved = float(input(\"Enter the", "은 다음과 같이 주어집니다. V_r = z * c = (lambda_obsereved - lamda_0/lambda_0)*c", "def func_V_r(): print(\" \\n V_r = \" , end='', flush = True) func_V_r()", "calculator.py ''' 21cm 전파를 보내는 행성상성운과 우리 은하 사이 상대적 속도를 계산합니다. 21cm", "func_V_r(): print(\" \\n V_r = \" , end='', flush = True) func_V_r() def", "import math lambda_obsereved = float(input(\"Enter the obeserved wavelength (lambda_obsereved): \")) lambda_0 = 5006.84", "c = const.c.to('km/s') def func_V_r(): print(\" \\n V_r = \" , end='', flush", "주어졌을 때 시선속도 V_r 은 다음과 같이 주어집니다. V_r = z * c", "end='', flush = True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved - lambda_0)/", "V_r 은 다음과 같이 주어집니다. V_r = z * c = (lambda_obsereved -", "V_r = \" , end='', flush = True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c):", "= True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved - lambda_0)/ lambda_0 )*c", "math lambda_obsereved = float(input(\"Enter the obeserved wavelength (lambda_obsereved): \")) lambda_0 = 5006.84 c", "적색편이 z가 주어졌을 때 시선속도 V_r 은 다음과 같이 주어집니다. V_r = z", "\")) lambda_0 = 5006.84 c = const.c.to('km/s') def func_V_r(): print(\" \\n V_r =", "5006.84 A 이고,관측된 파장 lamda 와 적색편이 z가 주어졌을 때 시선속도 V_r 은", "= float(input(\"Enter the obeserved wavelength (lambda_obsereved): \")) lambda_0 = 5006.84 c = const.c.to('km/s')", "lamda 와 적색편이 z가 주어졌을 때 시선속도 V_r 은 다음과 같이 주어집니다. V_r", "21cm 전파의 lamda_0 는 5006.84 A 이고,관측된 파장 lamda 와 적색편이 z가 주어졌을", "= 5006.84 c = const.c.to('km/s') def func_V_r(): print(\" \\n V_r = \" ,", "True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved - lambda_0)/ lambda_0 )*c )", "는 5006.84 A 이고,관측된 파장 lamda 와 적색편이 z가 주어졌을 때 시선속도 V_r", "z가 주어졌을 때 시선속도 V_r 은 다음과 같이 주어집니다. V_r = z *", "velocity calculator.py ''' 21cm 전파를 보내는 행성상성운과 우리 은하 사이 상대적 속도를 계산합니다.", "와 적색편이 z가 주어졌을 때 시선속도 V_r 은 다음과 같이 주어집니다. V_r =", "lambda_obsereved = float(input(\"Enter the obeserved wavelength (lambda_obsereved): \")) lambda_0 = 5006.84 c =", "다음과 같이 주어집니다. V_r = z * c = (lambda_obsereved - lamda_0/lambda_0)*c '''", "은하 사이 상대적 속도를 계산합니다. 21cm 전파의 lamda_0 는 5006.84 A 이고,관측된 파장", "V_r = z * c = (lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy import", "as const import math lambda_obsereved = float(input(\"Enter the obeserved wavelength (lambda_obsereved): \")) lambda_0", "<filename>planetary_nebula_calculator/radial velocity calculator.py ''' 21cm 전파를 보내는 행성상성운과 우리 은하 사이 상대적 속도를", "시선속도 V_r 은 다음과 같이 주어집니다. V_r = z * c = (lambda_obsereved", "import constants as const import math lambda_obsereved = float(input(\"Enter the obeserved wavelength (lambda_obsereved):", "5006.84 c = const.c.to('km/s') def func_V_r(): print(\" \\n V_r = \" , end='',", "func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved - lambda_0)/ lambda_0 )*c ) func_radial_velocity(lambda_obsereved, lambda_0, c)", "= z * c = (lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy import constants", "(lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy import constants as const import math lambda_obsereved", "이고,관측된 파장 lamda 와 적색편이 z가 주어졌을 때 시선속도 V_r 은 다음과 같이", "우리 은하 사이 상대적 속도를 계산합니다. 21cm 전파의 lamda_0 는 5006.84 A 이고,관측된", "astropy import constants as const import math lambda_obsereved = float(input(\"Enter the obeserved wavelength", "파장 lamda 와 적색편이 z가 주어졌을 때 시선속도 V_r 은 다음과 같이 주어집니다.", "constants as const import math lambda_obsereved = float(input(\"Enter the obeserved wavelength (lambda_obsereved): \"))", "c = (lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy import constants as const import", "obeserved wavelength (lambda_obsereved): \")) lambda_0 = 5006.84 c = const.c.to('km/s') def func_V_r(): print(\"", "* c = (lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy import constants as const", "= (lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy import constants as const import math", "행성상성운과 우리 은하 사이 상대적 속도를 계산합니다. 21cm 전파의 lamda_0 는 5006.84 A", "(lambda_obsereved): \")) lambda_0 = 5006.84 c = const.c.to('km/s') def func_V_r(): print(\" \\n V_r", "const.c.to('km/s') def func_V_r(): print(\" \\n V_r = \" , end='', flush = True)", "\" , end='', flush = True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved", "사이 상대적 속도를 계산합니다. 21cm 전파의 lamda_0 는 5006.84 A 이고,관측된 파장 lamda", "전파를 보내는 행성상성운과 우리 은하 사이 상대적 속도를 계산합니다. 21cm 전파의 lamda_0 는", "const import math lambda_obsereved = float(input(\"Enter the obeserved wavelength (lambda_obsereved): \")) lambda_0 =", "보내는 행성상성운과 우리 은하 사이 상대적 속도를 계산합니다. 21cm 전파의 lamda_0 는 5006.84", "A 이고,관측된 파장 lamda 와 적색편이 z가 주어졌을 때 시선속도 V_r 은 다음과", ", end='', flush = True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved -", "from astropy import constants as const import math lambda_obsereved = float(input(\"Enter the obeserved", "func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved - lambda_0)/ lambda_0 )*c ) func_radial_velocity(lambda_obsereved,", "주어집니다. V_r = z * c = (lambda_obsereved - lamda_0/lambda_0)*c ''' from astropy", "flush = True) func_V_r() def func_radial_velocity(lambda_obsereved, lambda_0, c): print( ((lambda_obsereved - lambda_0)/ lambda_0", "전파의 lamda_0 는 5006.84 A 이고,관측된 파장 lamda 와 적색편이 z가 주어졌을 때" ]
[ "Engineer & Software developer <EMAIL> Created on 27 December, 2017 @ 12:40 AM.", "December, 2017 @ 12:40 AM. Copyright © 2017. Victor. All rights reserved. \"\"\"", "12:40 AM. Copyright © 2017. Victor. All rights reserved. \"\"\" import os APP_NAME", "\"\"\" import os APP_NAME = 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR = os.path.join(PROJECT_DIR, 'static')", "Victor. All rights reserved. \"\"\" import os APP_NAME = 'folktales' PROJECT_DIR = os.getcwd()", "reserved. \"\"\" import os APP_NAME = 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR = os.path.join(PROJECT_DIR,", "APP_NAME = 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR = os.path.join(PROJECT_DIR, 'static') DATASET_DIR = os.path.join(STATIC_DIR,", "os APP_NAME = 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR = os.path.join(PROJECT_DIR, 'static') DATASET_DIR =", "rights reserved. \"\"\" import os APP_NAME = 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR =", "developer <EMAIL> Created on 27 December, 2017 @ 12:40 AM. Copyright © 2017.", "2017 @ 12:40 AM. Copyright © 2017. Victor. All rights reserved. \"\"\" import", "= 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR = os.path.join(PROJECT_DIR, 'static') DATASET_DIR = os.path.join(STATIC_DIR, 'datasets')", "\"\"\" @author <NAME> A.I. Engineer & Software developer <EMAIL> Created on 27 December,", "Created on 27 December, 2017 @ 12:40 AM. Copyright © 2017. Victor. All", "@ 12:40 AM. Copyright © 2017. Victor. All rights reserved. \"\"\" import os", "@author <NAME> A.I. Engineer & Software developer <EMAIL> Created on 27 December, 2017", "© 2017. Victor. All rights reserved. \"\"\" import os APP_NAME = 'folktales' PROJECT_DIR", "on 27 December, 2017 @ 12:40 AM. Copyright © 2017. Victor. All rights", "Copyright © 2017. Victor. All rights reserved. \"\"\" import os APP_NAME = 'folktales'", "<EMAIL> Created on 27 December, 2017 @ 12:40 AM. Copyright © 2017. Victor.", "2017. Victor. All rights reserved. \"\"\" import os APP_NAME = 'folktales' PROJECT_DIR =", "Software developer <EMAIL> Created on 27 December, 2017 @ 12:40 AM. Copyright ©", "<NAME> A.I. Engineer & Software developer <EMAIL> Created on 27 December, 2017 @", "& Software developer <EMAIL> Created on 27 December, 2017 @ 12:40 AM. Copyright", "import os APP_NAME = 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR = os.path.join(PROJECT_DIR, 'static') DATASET_DIR", "All rights reserved. \"\"\" import os APP_NAME = 'folktales' PROJECT_DIR = os.getcwd() STATIC_DIR", "A.I. Engineer & Software developer <EMAIL> Created on 27 December, 2017 @ 12:40", "AM. Copyright © 2017. Victor. All rights reserved. \"\"\" import os APP_NAME =", "27 December, 2017 @ 12:40 AM. Copyright © 2017. Victor. All rights reserved." ]
[ "= 'data/raw' PROCESSED_DATA_PATH = 'data/processed' OUTPUT_DATA_PATH = 'data/output' MODELS_PATH = 'models' REPORTS_PATH =", "'data/raw' PROCESSED_DATA_PATH = 'data/processed' OUTPUT_DATA_PATH = 'data/output' MODELS_PATH = 'models' REPORTS_PATH = 'reports'", "RAW_DATA_PATH = 'data/raw' PROCESSED_DATA_PATH = 'data/processed' OUTPUT_DATA_PATH = 'data/output' MODELS_PATH = 'models' REPORTS_PATH" ]
[ "PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category", "= PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category =", "test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title =", "class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category =", "= VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category =", "= PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0] second_saved_podcast_category = saved_podcast_category[1] self.assertEqual(first_saved_podcast_category.title, 'PodCastCategory1') self.assertEqual(second_saved_podcast_category.title,", "first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category =", "= BlogCategory() first_category.title = 'Category1' first_category.save() second_category = BlogCategory() second_category.title = 'Category2' second_category.save()", "first_saved_category = saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def", "import static from django.test import TestCase import environ import os class BlogCategoryModelTest(TestCase): def", "test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title = 'Category1' first_category.save() second_category = BlogCategory() second_category.title =", "def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title", "first_category.title = 'Category1' first_category.save() second_category = BlogCategory() second_category.title = 'Category2' second_category.save() saved_category =", "first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2'", "VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0]", "second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category", "= VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save()", "second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category =", "self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save()", "VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory()", "first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2)", "= saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self):", "SimpleUploadedFile from django.templatetags.static import static from django.test import TestCase import environ import os", "second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category", "= 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all()", "import TestCase import environ import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory()", "django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import static from django.test import TestCase import environ", "import environ import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title =", "static from django.test import TestCase import environ import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self):", "environ import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title = 'Category1'", "PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0]", "= 'Category1' first_category.save() second_category = BlogCategory() second_category.title = 'Category2' second_category.save() saved_category = BlogCategory.objects.all()", "second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0] second_saved_podcast_category", "import SimpleUploadedFile from django.templatetags.static import static from django.test import TestCase import environ import", "django.contrib.auth.models import User from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import static from django.test", "test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title =", "= PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save()", "saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title", "Index from content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import User from", "2) first_saved_category = saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase):", "second_category = BlogCategory() second_category.title = 'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category", "first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category =", "django.test import TestCase import environ import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category =", "= BlogCategory() second_category.title = 'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category =", "second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category =", "BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2')", "import Index from content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import User", "VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category", "'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(),", "second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title,", "django.templatetags.static import static from django.test import TestCase import environ import os class BlogCategoryModelTest(TestCase):", "self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title =", "first_category.save() second_category = BlogCategory() second_category.title = 'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2)", "self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class", "first_category = BlogCategory() first_category.title = 'Category1' first_category.save() second_category = BlogCategory() second_category.title = 'Category2'", "import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title = 'Category1' first_category.save()", "TestCase import environ import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title", "import BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import User from django.core.files.uploadedfile import SimpleUploadedFile", "User from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import static from django.test import TestCase", "def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title = 'Category1' first_category.save() second_category = BlogCategory() second_category.title", "= 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category =", "saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1')", "VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2')", "os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title = 'Category1' first_category.save() second_category", "first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def", "first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2)", "'Category1' first_category.save() second_category = BlogCategory() second_category.title = 'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(),", "<gh_stars>0 from content.views import Index from content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog from", "self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save()", "from django.templatetags.static import static from django.test import TestCase import environ import os class", "PodcastCategory, Blog from django.contrib.auth.models import User from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import", "BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title = 'Category1' first_category.save() second_category = BlogCategory()", "= VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title,", "BlogCategory() second_category.title = 'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0]", "PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory()", "saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title", "from django.test import TestCase import environ import os class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category", "= 'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category =", "from content.views import Index from content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models", "content.views import Index from content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import", "content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import User from django.core.files.uploadedfile import", "'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1]", "second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title,", "import User from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import static from django.test import", "second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category", "BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import User from django.core.files.uploadedfile import SimpleUploadedFile from", "class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category =", "'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1'", "saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0] second_saved_podcast_category = saved_podcast_category[1] self.assertEqual(first_saved_podcast_category.title, 'PodCastCategory1')", "'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2' second_videocast_category.save() saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(),", "first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category = VideocastCategory() second_videocast_category.title = 'VideoCastCategory2'", "'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category", "class BlogCategoryModelTest(TestCase): def test_saving_and_retrieving_BlogCategory(self): first_category = BlogCategory() first_category.title = 'Category1' first_category.save() second_category =", "second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0] second_saved_podcast_category = saved_podcast_category[1] self.assertEqual(first_saved_podcast_category.title,", "second_category.title = 'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category", "BlogCategory() first_category.title = 'Category1' first_category.save() second_category = BlogCategory() second_category.title = 'Category2' second_category.save() saved_category", "from content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import User from django.core.files.uploadedfile", "= 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title = 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all()", "VideocastCategory, PodcastCategory, Blog from django.contrib.auth.models import User from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static", "PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0] second_saved_podcast_category = saved_podcast_category[1] self.assertEqual(first_saved_podcast_category.title, 'PodCastCategory1') self.assertEqual(second_saved_podcast_category.title, 'PodCastCategory2')", "from django.contrib.auth.models import User from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import static from", "Blog from django.contrib.auth.models import User from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import static", "saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category", "self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title =", "= saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory()", "'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory() first_videocast_category.title = 'VideoCastCategory1' first_videocast_category.save() second_videocast_category", "'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0] second_saved_podcast_category = saved_podcast_category[1]", "= 'PodCastCategory2' second_podcast_category.save() saved_podcast_category = PodcastCategory.objects.all() self.assertEqual(saved_podcast_category.count(), 2) first_saved_podcast_category = saved_podcast_category[0] second_saved_podcast_category =", "= saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self):", "saved_videocast_category = VideocastCategory.objects.all() self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1')", "from django.core.files.uploadedfile import SimpleUploadedFile from django.templatetags.static import static from django.test import TestCase import", "= saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title, 'Category2') class VideocastCategoryModelTest(TestCase): def test_saving_and_retrieving_VideocastCategory(self): first_videocast_category = VideocastCategory()", "2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase):", "'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1'", "self.assertEqual(saved_videocast_category.count(), 2) first_saved_videocast_category = saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class", "saved_videocast_category[0] second_saved_videocast_category = saved_videocast_category[1] self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1') self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2') class PodcastCategoryModelTest(TestCase): def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category", "'Category2' second_category.save() saved_category = BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category = saved_category[1]", "= BlogCategory.objects.all() self.assertEqual(saved_category.count(), 2) first_saved_category = saved_category[0] second_saved_category = saved_category[1] self.assertEqual(first_saved_category.title, 'Category1') self.assertEqual(second_saved_category.title,", "def test_saving_and_retrieving_PodcastCategory(self): first_podcast_category = PodcastCategory() first_podcast_category.title = 'PodCastCategory1' first_podcast_category.save() second_podcast_category = PodcastCategory() second_podcast_category.title" ]
[]
[ "1 mid = (start + end) // 2 while mid != start: if", "if __name__ == '__main__': arr = [2, 4, 6, 8, 10, 12, 14,", "or arr[end] == value: return True return False if __name__ == '__main__': arr", "+ end) // 2 while mid != start: if arr[mid] == value: return", "arr[end] == value: return True return False if __name__ == '__main__': arr =", "elif arr[mid] > value: end = mid else: start = mid mid =", "= mid else: start = mid mid = (start + end) // 2", "True return False if __name__ == '__main__': arr = [2, 4, 6, 8,", "# use when list is sorted def binary_search(arr, value): length = len(arr) start", "arr[mid] > value: end = mid else: start = mid mid = (start", "= mid mid = (start + end) // 2 if arr[start] == value", "is sorted def binary_search(arr, value): length = len(arr) start = 0 end =", "== value: return True return False if __name__ == '__main__': arr = [2,", "== value: return True elif arr[mid] > value: end = mid else: start", "# space compleity- O(1) # use when list is sorted def binary_search(arr, value):", "length = len(arr) start = 0 end = length - 1 mid =", "start: if arr[mid] == value: return True elif arr[mid] > value: end =", "value or arr[end] == value: return True return False if __name__ == '__main__':", "end) // 2 if arr[start] == value or arr[end] == value: return True", "when list is sorted def binary_search(arr, value): length = len(arr) start = 0", "mid != start: if arr[mid] == value: return True elif arr[mid] > value:", "end = mid else: start = mid mid = (start + end) //", "arr = [2, 4, 6, 8, 10, 12, 14, 16, 18] print(binary_search(arr, 16))", "compleity_ O(logn) # space compleity- O(1) # use when list is sorted def", "value: end = mid else: start = mid mid = (start + end)", "== value or arr[end] == value: return True return False if __name__ ==", "while mid != start: if arr[mid] == value: return True elif arr[mid] >", "- 1 mid = (start + end) // 2 while mid != start:", "compleity- O(1) # worst time compleity_ O(logn) # space compleity- O(1) # use", "// 2 while mid != start: if arr[mid] == value: return True elif", "arr[mid] == value: return True elif arr[mid] > value: end = mid else:", "arr[start] == value or arr[end] == value: return True return False if __name__", "binary_search(arr, value): length = len(arr) start = 0 end = length - 1", "mid = (start + end) // 2 if arr[start] == value or arr[end]", "return True elif arr[mid] > value: end = mid else: start = mid", "if arr[mid] == value: return True elif arr[mid] > value: end = mid", "compleity- O(1) # use when list is sorted def binary_search(arr, value): length =", "sorted def binary_search(arr, value): length = len(arr) start = 0 end = length", "// 2 if arr[start] == value or arr[end] == value: return True return", "end = length - 1 mid = (start + end) // 2 while", "0 end = length - 1 mid = (start + end) // 2", "= 0 end = length - 1 mid = (start + end) //", "False if __name__ == '__main__': arr = [2, 4, 6, 8, 10, 12,", "(start + end) // 2 while mid != start: if arr[mid] == value:", "end) // 2 while mid != start: if arr[mid] == value: return True", "best time compleity- O(1) # worst time compleity_ O(logn) # space compleity- O(1)", "+ end) // 2 if arr[start] == value or arr[end] == value: return", "return True return False if __name__ == '__main__': arr = [2, 4, 6,", "= length - 1 mid = (start + end) // 2 while mid", "value: return True elif arr[mid] > value: end = mid else: start =", "mid mid = (start + end) // 2 if arr[start] == value or", "return False if __name__ == '__main__': arr = [2, 4, 6, 8, 10,", "worst time compleity_ O(logn) # space compleity- O(1) # use when list is", "= (start + end) // 2 while mid != start: if arr[mid] ==", "O(1) # use when list is sorted def binary_search(arr, value): length = len(arr)", "True elif arr[mid] > value: end = mid else: start = mid mid", "time compleity_ O(logn) # space compleity- O(1) # use when list is sorted", "space compleity- O(1) # use when list is sorted def binary_search(arr, value): length", "time compleity- O(1) # worst time compleity_ O(logn) # space compleity- O(1) #", "= (start + end) // 2 if arr[start] == value or arr[end] ==", "mid else: start = mid mid = (start + end) // 2 if", "'__main__': arr = [2, 4, 6, 8, 10, 12, 14, 16, 18] print(binary_search(arr,", "length - 1 mid = (start + end) // 2 while mid !=", "O(1) # worst time compleity_ O(logn) # space compleity- O(1) # use when", "__name__ == '__main__': arr = [2, 4, 6, 8, 10, 12, 14, 16,", "= len(arr) start = 0 end = length - 1 mid = (start", "start = 0 end = length - 1 mid = (start + end)", "mid = (start + end) // 2 while mid != start: if arr[mid]", "!= start: if arr[mid] == value: return True elif arr[mid] > value: end", "value: return True return False if __name__ == '__main__': arr = [2, 4,", "O(logn) # space compleity- O(1) # use when list is sorted def binary_search(arr,", "== '__main__': arr = [2, 4, 6, 8, 10, 12, 14, 16, 18]", "value): length = len(arr) start = 0 end = length - 1 mid", "2 while mid != start: if arr[mid] == value: return True elif arr[mid]", "list is sorted def binary_search(arr, value): length = len(arr) start = 0 end", "len(arr) start = 0 end = length - 1 mid = (start +", "(start + end) // 2 if arr[start] == value or arr[end] == value:", "# worst time compleity_ O(logn) # space compleity- O(1) # use when list", "if arr[start] == value or arr[end] == value: return True return False if", "start = mid mid = (start + end) // 2 if arr[start] ==", "2 if arr[start] == value or arr[end] == value: return True return False", "else: start = mid mid = (start + end) // 2 if arr[start]", "use when list is sorted def binary_search(arr, value): length = len(arr) start =", "# best time compleity- O(1) # worst time compleity_ O(logn) # space compleity-", "def binary_search(arr, value): length = len(arr) start = 0 end = length -", "> value: end = mid else: start = mid mid = (start +" ]
[ "\"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12", "LONDON ROAD, BASILDON \"100091212751\", # PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\", # 17", "ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\", # FLAT", "GREENS FARM LANE, BILLERICAY ]: return None if record.addressline6 in [ \"SS12 0AU\",", "\"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16", "\"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11", "BaseXpressDemocracyClubCsvImporter class Command(BaseXpressDemocracyClubCsvImporter): council_id = \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections", "class Command(BaseXpressDemocracyClubCsvImporter): council_id = \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections =", "LAINDON, BASILDON \"100090273531\", # 23 GREENS FARM LANE, BILLERICAY ]: return None if", "address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\", # CEDAR COTTAGE,", "TYE COMMON ROAD, BILLERICAY \"100090239089\", # FLAT 1, ST. DAVIDS COURT, LONDON ROAD,", "BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\", # FLAT 1,", "ROAD, BASILDON \"100091212751\", # PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\", # 17 CHURCH", "0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\",", "addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter = \",\" def", "\"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter = \",\"", "\"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12", "# 17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\", # 23 GREENS FARM LANE, BILLERICAY", "2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\", \"SS14 3RZ\", ]: return", "\"10090682049\", # FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\", # FLAT 1, ST. DAVIDS", "COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON ROAD, BILLERICAY", "\"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15", "[\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn in", "# CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON", "\"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15", "CHURCH ROAD, LAINDON, BASILDON \"100090273531\", # 23 GREENS FARM LANE, BILLERICAY ]: return", "\"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self, record):", "PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\", # 17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\",", "stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self, record): uprn", "CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON ROAD,", "# PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\", # 17 CHURCH ROAD, LAINDON, BASILDON", "2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\",", "# FLAT 1, ST. DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\", # PROBATION OFFICE,", "None if record.addressline6 in [ \"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\",", "LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\",", "\"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\", \"SS14 3RZ\", ]: return None", "uprn in [ \"100091587330\", # CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\",", "Command(BaseXpressDemocracyClubCsvImporter): council_id = \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"]", "FELMORES, BASILDON \"10013352273\", # 17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\", # 23 GREENS", "ROAD, LAINDON, BASILDON \"100090273531\", # 23 GREENS FARM LANE, BILLERICAY ]: return None", "BILLERICAY \"100090239089\", # FLAT 1, ST. DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\", #", "record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\", # CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN,", "FARM LANE, BILLERICAY ]: return None if record.addressline6 in [ \"SS12 0AU\", \"SS15", "# FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\", # FLAT 1, ST. DAVIDS COURT,", "5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\",", "\"100091212751\", # PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\", # 17 CHURCH ROAD, LAINDON,", "17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\", # 23 GREENS FARM LANE, BILLERICAY ]:", "\"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\", \"SS14 3RZ\", ]:", "\",\" def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\", #", "= record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\", # CEDAR COTTAGE, LOWER DUNTON ROAD,", "\"100090239089\", # FLAT 1, ST. DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\", # PROBATION", "\"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\", \"SS14", "\"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13", "5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\", \"SS14 3RZ\", ]: return None return", "23 GREENS FARM LANE, BILLERICAY ]: return None if record.addressline6 in [ \"SS12", "if uprn in [ \"100091587330\", # CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER", "2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\",", "1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\",", "return None if record.addressline6 in [ \"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13", "ROAD, BILLERICAY \"100090239089\", # FLAT 1, ST. DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\",", "LANE, BILLERICAY ]: return None if record.addressline6 in [ \"SS12 0AU\", \"SS15 5GX\",", "[ \"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\",", "in [ \"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11", "9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\",", "2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\",", "= \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter =", "DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\", #", "record.addressline6 in [ \"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\",", "FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\", # FLAT 1, ST. DAVIDS COURT, LONDON", "[ \"100091587330\", # CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE,", "BASILDON \"100090273531\", # 23 GREENS FARM LANE, BILLERICAY ]: return None if record.addressline6", "OFFICE, 1 FELMORES, BASILDON \"10013352273\", # 17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\", #", "COMMON ROAD, BILLERICAY \"100090239089\", # FLAT 1, ST. DAVIDS COURT, LONDON ROAD, BASILDON", "3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\",", "FLAT 1, ST. DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\", # PROBATION OFFICE, 1", "data_importers.management.commands import BaseXpressDemocracyClubCsvImporter class Command(BaseXpressDemocracyClubCsvImporter): council_id = \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name =", "in [ \"100091587330\", # CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", #", "1 FELMORES, BASILDON \"10013352273\", # 17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\", # 23", "from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter class Command(BaseXpressDemocracyClubCsvImporter): council_id = \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name", "def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\", # CEDAR", "= \",\" def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\",", "]: return None if record.addressline6 in [ \"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\",", "DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\", # PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\",", "import BaseXpressDemocracyClubCsvImporter class Command(BaseXpressDemocracyClubCsvImporter): council_id = \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\"", "2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\",", "record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\", # CEDAR COTTAGE, LOWER", "5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\", \"SS14 3RZ\",", "BASILDON \"10013352273\", # 17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\", # 23 GREENS FARM", "\"CM11 2ER\", \"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15", "= \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self, record): uprn =", "elections = [\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if", "\"100090273531\", # 23 GREENS FARM LANE, BILLERICAY ]: return None if record.addressline6 in", "1, ST. DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\", # PROBATION OFFICE, 1 FELMORES,", "\"SS13 2EA\", \"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13", "\"10013352273\", # 17 CHURCH ROAD, LAINDON, BASILDON \"100090273531\", # 23 GREENS FARM LANE,", "6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\", \"SS13 2LG\",", "BILLERICAY ]: return None if record.addressline6 in [ \"SS12 0AU\", \"SS15 5GX\", \"CM11", "= [\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn", "council_id = \"BAI\" addresses_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter", "# 23 GREENS FARM LANE, BILLERICAY ]: return None if record.addressline6 in [", "\"CM11 2JX\", \"CM11 2AD\", \"CM12 9JJ\", \"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11", "UPMINSTER \"10090682049\", # FERNDALE, TYE COMMON ROAD, BILLERICAY \"100090239089\", # FLAT 1, ST.", "COURT, LONDON ROAD, BASILDON \"100091212751\", # PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\", #", "uprn = record.property_urn.strip().lstrip(\"0\") if uprn in [ \"100091587330\", # CEDAR COTTAGE, LOWER DUNTON", "\"SS15 6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16", "ST. DAVIDS COURT, LONDON ROAD, BASILDON \"100091212751\", # PROBATION OFFICE, 1 FELMORES, BASILDON", "\"100091587330\", # CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER \"10090682049\", # FERNDALE, TYE", "= \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" stations_name = \"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self,", "csv_delimiter = \",\" def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\") if uprn in [", "BASILDON \"100091212751\", # PROBATION OFFICE, 1 FELMORES, BASILDON \"10013352273\", # 17 CHURCH ROAD,", "\"2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv\" elections = [\"2021-05-06\"] csv_delimiter = \",\" def address_record_to_dict(self, record): uprn = record.property_urn.strip().lstrip(\"0\")", "\"SS13 2LG\", \"SS16 6PH\", \"SS12 9LE\", \"SS14 3RZ\", ]: return None return super().address_record_to_dict(record)", "if record.addressline6 in [ \"SS12 0AU\", \"SS15 5GX\", \"CM11 2ER\", \"SS13 2EA\", \"CM11", "6GJ\", \"SS15 6PF\", \"SS13 3EA\", \"CM11 1HH\", \"SS15 5NZ\", \"CM11 2RU\", \"SS16 5PW\"," ]
[ "= (self.cursor[position] + 1) % self.dataset_size return sequence def next(self): '''Generate next batch", "dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word in self.words] def convert_wordids_to_words(self,", "+ '.' + dataset_type + '.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data", "def build_vocabulary(self): counter = Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words,", "= sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) self.words2id = dict(zip(words,", "= config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size =", "as np import tensorflow as tf from collections import Counter class Dataset(): '''Load", "self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size return sequence def next(self): '''Generate next", "= config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size = len(self.data) segment =", "load(self): '''Reading dataset as a list of words''' with open(self.file_name, 'rb') as f:", "__init__(self, config, dataset_type): self.config = config self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' +", "words class BatchGenerator(): '''Generate Batches''' def __init__(self, config): self.config = config self.batch_size =", "id in wordids] return words class BatchGenerator(): '''Generate Batches''' def __init__(self, config): self.config", "batch from the data''' batch = [] for position in xrange(self.batch_size): batch.append(self.sequence(position)) return", "word in self.words] def convert_wordids_to_words(self, wordids): words = [self.id2words[id] for id in wordids]", "build_vocabulary(self): counter = Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _", "xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size return sequence", "in self.words] def convert_wordids_to_words(self, wordids): words = [self.id2words[id] for id in wordids] return", "(-x[1], x[0])) words, _ = list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)),", "offset in xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def", "import Counter class Dataset(): '''Load dataset''' def __init__(self, config, dataset_type): self.config = config", "self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def load(self): '''Reading dataset", "of words''' with open(self.file_name, 'rb') as f: words = tf.compat.as_str(f.read()).split() self.words = words", "segment = dataset_size / batch_size self.cursor = [offset * segment for offset in", "sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words))))", "'''Generate Batches''' def __init__(self, config): self.config = config self.batch_size = batch_size = config.batch_size", "= tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self): counter = Counter(self.words) count_pairs = sorted(counter.items(),", "= self.wordids def load(self): '''Reading dataset as a list of words''' with open(self.file_name,", "config): self.config = config self.batch_size = batch_size = config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type", "self.words2id = dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word]", "xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def sequence(self, position):", "= batch_size = config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size =", "[self.id2words[id] for id in wordids] return words class BatchGenerator(): '''Generate Batches''' def __init__(self,", "for word in self.words] def convert_wordids_to_words(self, wordids): words = [self.id2words[id] for id in", "sequence from a cursor position''' sequence = [] for _ in xrange(self.num_unrollings +", "% self.dataset_size return sequence def next(self): '''Generate next batch from the data''' batch", "self.convert_words_to_wordids() self.data = self.wordids def load(self): '''Reading dataset as a list of words'''", "Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) self.words2id", "words''' with open(self.file_name, 'rb') as f: words = tf.compat.as_str(f.read()).split() self.words = words def", "class Dataset(): '''Load dataset''' def __init__(self, config, dataset_type): self.config = config self.file_name =", "= Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs))", "def __init__(self, config): self.config = config self.batch_size = batch_size = config.batch_size self.num_unrollings =", "(self.cursor[position] + 1) % self.dataset_size return sequence def next(self): '''Generate next batch from", "as a list of words''' with open(self.file_name, 'rb') as f: words = tf.compat.as_str(f.read()).split()", "= Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def sequence(self, position): '''Generate a sequence from", "= len(self.data) segment = dataset_size / batch_size self.cursor = [offset * segment for", "'''Generate next batch from the data''' batch = [] for position in xrange(self.batch_size):", "self.wordids def load(self): '''Reading dataset as a list of words''' with open(self.file_name, 'rb')", "tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self): counter = Counter(self.words) count_pairs = sorted(counter.items(), key=lambda", "dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word", "= [self.id2words[id] for id in wordids] return words class BatchGenerator(): '''Generate Batches''' def", "'.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def load(self): '''Reading", "words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word in self.words] def convert_wordids_to_words(self, wordids):", "x[0])) words, _ = list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words))", "convert_wordids_to_words(self, wordids): words = [self.id2words[id] for id in wordids] return words class BatchGenerator():", "import os import zipfile import numpy as np import tensorflow as tf from", "words = tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self): counter = Counter(self.words) count_pairs =", "BatchGenerator(): '''Generate Batches''' def __init__(self, config): self.config = config self.batch_size = batch_size =", "= config self.batch_size = batch_size = config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type", "[offset * segment for offset in xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type)", "key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words", "in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size return", "self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def load(self): '''Reading dataset as a list of", "convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word in self.words] def convert_wordids_to_words(self, wordids): words =", "range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word in", "Counter class Dataset(): '''Load dataset''' def __init__(self, config, dataset_type): self.config = config self.file_name", "os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type + '.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary()", "a list of words''' with open(self.file_name, 'rb') as f: words = tf.compat.as_str(f.read()).split() self.words", "self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size = len(self.data) segment", "= dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for", "import numpy as np import tensorflow as tf from collections import Counter class", "for _ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1) %", "config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size = len(self.data) segment = dataset_size", "config self.batch_size = batch_size = config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset()", "self.config = config self.batch_size = batch_size = config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type =", "wordids): words = [self.id2words[id] for id in wordids] return words class BatchGenerator(): '''Generate", "count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) self.words2id =", "def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def load(self): '''Reading dataset as", "dataset = Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def sequence(self, position): '''Generate a sequence", "[self.words2id[word] for word in self.words] def convert_wordids_to_words(self, wordids): words = [self.id2words[id] for id", "next batch from the data''' batch = [] for position in xrange(self.batch_size): batch.append(self.sequence(position))", "with open(self.file_name, 'rb') as f: words = tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self):", "self.words] def convert_wordids_to_words(self, wordids): words = [self.id2words[id] for id in wordids] return words", "next(self): '''Generate next batch from the data''' batch = [] for position in", "from collections import Counter class Dataset(): '''Load dataset''' def __init__(self, config, dataset_type): self.config", "class BatchGenerator(): '''Generate Batches''' def __init__(self, config): self.config = config self.batch_size = batch_size", "= dataset.data def sequence(self, position): '''Generate a sequence from a cursor position''' sequence", "from the data''' batch = [] for position in xrange(self.batch_size): batch.append(self.sequence(position)) return np.array(batch)", "a sequence from a cursor position''' sequence = [] for _ in xrange(self.num_unrollings", "/ batch_size self.cursor = [offset * segment for offset in xrange(batch_size)] def load_dataset(self):", "list of words''' with open(self.file_name, 'rb') as f: words = tf.compat.as_str(f.read()).split() self.words =", "self.batch_dataset_type) self.data = dataset.data def sequence(self, position): '''Generate a sequence from a cursor", "sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size return sequence def next(self): '''Generate", "'''Reading dataset as a list of words''' with open(self.file_name, 'rb') as f: words", "self.words = words def build_vocabulary(self): counter = Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x:", "self.batch_size = batch_size = config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size", "def sequence(self, position): '''Generate a sequence from a cursor position''' sequence = []", "[] for _ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1)", "self.data = dataset.data def sequence(self, position): '''Generate a sequence from a cursor position'''", "= list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids", "import zipfile import numpy as np import tensorflow as tf from collections import", "= os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type + '.txt') self.load_dataset() def load_dataset(self): self.load()", "def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def sequence(self, position): '''Generate", "from a cursor position''' sequence = [] for _ in xrange(self.num_unrollings + 1):", "sequence = [] for _ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position]", "= config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size = len(self.data) segment = dataset_size / batch_size", "for id in wordids] return words class BatchGenerator(): '''Generate Batches''' def __init__(self, config):", "x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words =", "= dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word in self.words] def", "words, _ = list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def", "list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids =", "1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size return sequence def next(self):", "sequence def next(self): '''Generate next batch from the data''' batch = [] for", "open(self.file_name, 'rb') as f: words = tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self): counter", "dataset_type + '.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def", "+ '.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def load(self):", "dataset_type): self.config = config self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type +", "'''Load dataset''' def __init__(self, config, dataset_type): self.config = config self.file_name = os.path.join(config.dataset_dir, config.dataset", "dataset''' def __init__(self, config, dataset_type): self.config = config self.file_name = os.path.join(config.dataset_dir, config.dataset +", "= words def build_vocabulary(self): counter = Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x: (-x[1],", "self.cursor = [offset * segment for offset in xrange(batch_size)] def load_dataset(self): dataset =", "self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def load(self): '''Reading dataset as a list", "'''Generate a sequence from a cursor position''' sequence = [] for _ in", "batch_size = config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size", "config.batch_size self.num_unrollings = config.num_unrollings self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size = len(self.data)", "* segment for offset in xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data", "= [self.words2id[word] for word in self.words] def convert_wordids_to_words(self, wordids): words = [self.id2words[id] for", "self.dataset_size return sequence def next(self): '''Generate next batch from the data''' batch =", "import tensorflow as tf from collections import Counter class Dataset(): '''Load dataset''' def", "self.dataset_size = dataset_size = len(self.data) segment = dataset_size / batch_size self.cursor = [offset", "__init__(self, config): self.config = config self.batch_size = batch_size = config.batch_size self.num_unrollings = config.num_unrollings", "tensorflow as tf from collections import Counter class Dataset(): '''Load dataset''' def __init__(self,", "as f: words = tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self): counter = Counter(self.words)", "_ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size", "len(self.data) segment = dataset_size / batch_size self.cursor = [offset * segment for offset", "def __init__(self, config, dataset_type): self.config = config self.file_name = os.path.join(config.dataset_dir, config.dataset + '.'", "1) % self.dataset_size return sequence def next(self): '''Generate next batch from the data'''", "config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size = len(self.data) segment = dataset_size / batch_size self.cursor", "<reponame>reetawwsum/Language-Model import os import zipfile import numpy as np import tensorflow as tf", "+ dataset_type + '.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids", "in wordids] return words class BatchGenerator(): '''Generate Batches''' def __init__(self, config): self.config =", "wordids] return words class BatchGenerator(): '''Generate Batches''' def __init__(self, config): self.config = config", "tf from collections import Counter class Dataset(): '''Load dataset''' def __init__(self, config, dataset_type):", "= [offset * segment for offset in xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config,", "def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word in self.words] def convert_wordids_to_words(self, wordids): words", "+ 1) % self.dataset_size return sequence def next(self): '''Generate next batch from the", "= dataset_size / batch_size self.cursor = [offset * segment for offset in xrange(batch_size)]", "words def build_vocabulary(self): counter = Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))", "def load(self): '''Reading dataset as a list of words''' with open(self.file_name, 'rb') as", "_ = list(zip(*count_pairs)) self.words2id = dict(zip(words, range(len(words)))) self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self):", "'.' + dataset_type + '.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data =", "Dataset(): '''Load dataset''' def __init__(self, config, dataset_type): self.config = config self.file_name = os.path.join(config.dataset_dir,", "in xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def sequence(self,", "counter = Counter(self.words) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ =", "as tf from collections import Counter class Dataset(): '''Load dataset''' def __init__(self, config,", "dataset.data def sequence(self, position): '''Generate a sequence from a cursor position''' sequence =", "self.config = config self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type + '.txt')", "Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def sequence(self, position): '''Generate a sequence from a", "self.wordids = [self.words2id[word] for word in self.words] def convert_wordids_to_words(self, wordids): words = [self.id2words[id]", "for offset in xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data = dataset.data", "return words class BatchGenerator(): '''Generate Batches''' def __init__(self, config): self.config = config self.batch_size", "cursor position''' sequence = [] for _ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position]", "sequence(self, position): '''Generate a sequence from a cursor position''' sequence = [] for", "return sequence def next(self): '''Generate next batch from the data''' batch = []", "self.batch_dataset_type = config.batch_dataset_type self.load_dataset() self.dataset_size = dataset_size = len(self.data) segment = dataset_size /", "self.id2words = dict(zip(range(len(words)), words)) def convert_words_to_wordids(self): self.wordids = [self.words2id[word] for word in self.words]", "segment for offset in xrange(batch_size)] def load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data =", "dataset as a list of words''' with open(self.file_name, 'rb') as f: words =", "position''' sequence = [] for _ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] =", "a cursor position''' sequence = [] for _ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]])", "numpy as np import tensorflow as tf from collections import Counter class Dataset():", "load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids() self.data = self.wordids def load(self): '''Reading dataset as a", "config self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type + '.txt') self.load_dataset() def", "os import zipfile import numpy as np import tensorflow as tf from collections", "= config self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type + '.txt') self.load_dataset()", "def next(self): '''Generate next batch from the data''' batch = [] for position", "+ 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size return sequence def", "config, dataset_type): self.config = config self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type", "'rb') as f: words = tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self): counter =", "load_dataset(self): dataset = Dataset(self.config, self.batch_dataset_type) self.data = dataset.data def sequence(self, position): '''Generate a", "Batches''' def __init__(self, config): self.config = config self.batch_size = batch_size = config.batch_size self.num_unrollings", "dataset_size = len(self.data) segment = dataset_size / batch_size self.cursor = [offset * segment", "batch_size self.cursor = [offset * segment for offset in xrange(batch_size)] def load_dataset(self): dataset", "self.load_dataset() self.dataset_size = dataset_size = len(self.data) segment = dataset_size / batch_size self.cursor =", "dataset_size / batch_size self.cursor = [offset * segment for offset in xrange(batch_size)] def", "= [] for _ in xrange(self.num_unrollings + 1): sequence.append(self.data[self.cursor[position]]) self.cursor[position] = (self.cursor[position] +", "words = [self.id2words[id] for id in wordids] return words class BatchGenerator(): '''Generate Batches'''", "collections import Counter class Dataset(): '''Load dataset''' def __init__(self, config, dataset_type): self.config =", "def convert_wordids_to_words(self, wordids): words = [self.id2words[id] for id in wordids] return words class", "np import tensorflow as tf from collections import Counter class Dataset(): '''Load dataset'''", "config.dataset + '.' + dataset_type + '.txt') self.load_dataset() def load_dataset(self): self.load() self.build_vocabulary() self.convert_words_to_wordids()", "self.data = self.wordids def load(self): '''Reading dataset as a list of words''' with", "position): '''Generate a sequence from a cursor position''' sequence = [] for _", "self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type + '.txt') self.load_dataset() def load_dataset(self):", "zipfile import numpy as np import tensorflow as tf from collections import Counter", "= dataset_size = len(self.data) segment = dataset_size / batch_size self.cursor = [offset *", "f: words = tf.compat.as_str(f.read()).split() self.words = words def build_vocabulary(self): counter = Counter(self.words) count_pairs" ]
[ "test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config ) return test_neal_funnel.run() if __name__ ==", "<filename>test/integration/dppl/inference/test_neal_funnel.py from .harness import MCMCTest, Config from pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel", "import pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config ) return test_neal_funnel.run()", ".harness import MCMCTest, Config from pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest(", "from .harness import MCMCTest, Config from pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel =", "test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config ) return test_neal_funnel.run() if __name__ == \"__main__\":", "pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config ) return", "Config from pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config", "import MCMCTest, Config from pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel',", "from pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config )", "MCMCTest, Config from pprint import pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan',", "def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config ) return test_neal_funnel.run() if __name__", "pprint def test_neal_funnel(config=Config()): test_neal_funnel = MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config ) return test_neal_funnel.run() if", "= MCMCTest( name='neal_funnel', model_file='good/neal_funnel.stan', config=config ) return test_neal_funnel.run() if __name__ == \"__main__\": pprint(test_neal_funnel())" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"pypi_black\", strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule,", "name, strip_repo_prefix = \"\", **kwargs): if not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if", "may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #", "return repo_name = name[len(strip_repo_prefix):] if repo_name in native.existing_rules(): return repo_rule(name = repo_name, **kwargs)", "= \"\", **kwargs): if not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if repo_name in", "License. # You may obtain a copy of the License at # #", ") def _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs): if not name.startswith(strip_repo_prefix): return repo_name", "strip_repo_prefix = \"\", **kwargs): if not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if repo_name", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "\"\"\" def py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\", strip_prefix = \"black-19.3b0\", urls =", "compliance with the License. # You may obtain a copy of the License", "= glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe( http_archive, name =", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name = \"black\", srcs = glob([\"**/*.py\"]), visibility", "**kwargs): if not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if repo_name in native.existing_rules(): return", "you may not use this file except in compliance with the License. #", "governing permissions and # limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\"", "# You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0", "# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "\"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name = \"black\", srcs = glob([\"**/*.py\"]), visibility =", "Google LLC # # Licensed under the Apache License, Version 2.0 (the \"License\");", "ANY KIND, either express or implied. # See the License for the specific", "name = \"pypi_black\", strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, )", "and # limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name", "srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe( http_archive, name", "def py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\", strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"],", "in compliance with the License. # You may obtain a copy of the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the", "use this file except in compliance with the License. # You may obtain", "= \"pypi_black\", strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def", "not use this file except in compliance with the License. # You may", "License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs): if not", "Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0", "under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name = \"black\", srcs", "See the License for the specific language governing permissions and # limitations under", "You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\", strip_prefix =", "License, Version 2.0 (the \"License\"); # you may not use this file except", "_BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs): if not name.startswith(strip_repo_prefix): return", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if repo_name in native.existing_rules(): return repo_rule(name =", "copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "OF ANY KIND, either express or implied. # See the License for the", "2.0 (the \"License\"); # you may not use this file except in compliance", "# # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "\"\"\" py_binary( name = \"black\", srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\"", "https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "# you may not use this file except in compliance with the License.", "of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", ") \"\"\" def py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\", strip_prefix = \"black-19.3b0\", urls", "py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\", strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content", "agreed to in writing, software # distributed under the License is distributed on", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless", "(the \"License\"); # you may not use this file except in compliance with", "http_archive, name = \"pypi_black\", strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE,", "_maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs): if not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):]", "# limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name =", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "\"\", **kwargs): if not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if repo_name in native.existing_rules():", "limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name = \"black\",", "def _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs): if not name.startswith(strip_repo_prefix): return repo_name =", "either express or implied. # See the License for the specific language governing", "= _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs): if not name.startswith(strip_repo_prefix):", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name = \"black\", srcs = glob([\"**/*.py\"]),", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if repo_name in native.existing_rules(): return repo_rule(name = repo_name,", "at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "[\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\", strip_prefix = \"black-19.3b0\",", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "_BLACK_PY_BUILD_FILE = \"\"\" py_binary( name = \"black\", srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"],", "glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\",", "the specific language governing permissions and # limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")", "language governing permissions and # limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE =", "# Copyright 2019 Google LLC # # Licensed under the Apache License, Version", "file except in compliance with the License. # You may obtain a copy", "permissions and # limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary(", "= \"black\", srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe(", "<filename>python/py_gapic_repositories.bzl<gh_stars>100-1000 # Copyright 2019 Google LLC # # Licensed under the Apache License,", "name = \"black\", srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories():", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= \"\"\" py_binary( name = \"black\", srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], )", "License for the specific language governing permissions and # limitations under the License.", "a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix", "the License. # You may obtain a copy of the License at #", "\"black\", srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe( http_archive,", "to in writing, software # distributed under the License is distributed on an", "_maybe( http_archive, name = \"pypi_black\", strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "[\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs): if", "visibility = [\"//visibility:public\"], ) \"\"\" def py_gapic_repositories(): _maybe( http_archive, name = \"pypi_black\", strip_prefix", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "specific language governing permissions and # limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE", "required by applicable law or agreed to in writing, software # distributed under", "py_binary( name = \"black\", srcs = glob([\"**/*.py\"]), visibility = [\"//visibility:public\"], ) \"\"\" def", "applicable law or agreed to in writing, software # distributed under the License", "if not name.startswith(strip_repo_prefix): return repo_name = name[len(strip_repo_prefix):] if repo_name in native.existing_rules(): return repo_rule(name", "urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix = \"\",", "= [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs):", "or agreed to in writing, software # distributed under the License is distributed", "the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\") _BLACK_PY_BUILD_FILE = \"\"\" py_binary( name = \"black\", srcs =", "or implied. # See the License for the specific language governing permissions and", "strip_prefix = \"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name,", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "for the specific language governing permissions and # limitations under the License. load(\"@bazel_tools//tools/build_defs/repo:http.bzl\",", "\"black-19.3b0\", urls = [\"https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz\"], build_file_content = _BLACK_PY_BUILD_FILE, ) def _maybe(repo_rule, name, strip_repo_prefix =", "with the License. # You may obtain a copy of the License at", "in writing, software # distributed under the License is distributed on an \"AS", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "from sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split from", "Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_)", "print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_) print('Best Parameters :", "featnames = np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X, y, featnames def load_iwpc(data_folder):", "from sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer def", "if dataset == 'eyedata': X, y, featnames = load_eyedata(data_folder) if dataset == 'iwpc':", "__name__ == '__main__': data_folder = '../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify", "== 'eyedata': X, y, featnames = load_eyedata(data_folder) if dataset == 'iwpc': X, y,", "import train_test_split from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data =", "sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder):", "i: '{:03}'.format(i), range(X.shape[1])))) return X, y, featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types", "float} X, y = [], [] with open(datafile) as csvfile: reader = csv.DictReader(csvfile)", "load_iwpc(data_folder) train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9) time params = {", "'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy :", "featnames if __name__ == '__main__': data_folder = '../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata',", "str, 'age': float, 'height': float, 'weight': float, 'amiodarone': int, 'decr': int, 'cyp2c9': str,", "float, 'height': float, 'weight': float, 'amiodarone': int, 'decr': int, 'cyp2c9': str, 'vkorc1': str,", "dv.fit_transform(X) y = np.array(y) featnames = np.array(dv.get_feature_names()) return X, y, featnames if __name__", "['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1,", "Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_) print('Best Parameters : ',mlp_clf_grid.best_params_)", "data = np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data) X, y = data[:, :-1],", "y = np.array(y) featnames = np.array(dv.get_feature_names()) return X, y, featnames if __name__ ==", "'cyp2c9': str, 'vkorc1': str, 'dose': float} X, y = [], [] with open(datafile)", ": ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_) print('Best Parameters : ',mlp_clf_grid.best_params_) print('Best", "= '../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use')", "param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X,", "row[col_name] X.append(row) dv = DictVectorizer() X = dv.fit_transform(X) y = np.array(y) featnames =", "'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv = DictVectorizer() X = dv.fit_transform(X) y =", "for col_name in reader.fieldnames: col_type = col_types[col_name] row[col_name] = col_type(row[col_name]) # cast to", "from sklearn.preprocessing import scale from sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPRegressor from", "col_type = col_types[col_name] row[col_name] = col_type(row[col_name]) # cast to correct type if col_name", "train_y, test_y = train_test_split(X, y, random_state=9) time params = { 'activation' : ['identity',", "return X, y, featnames if __name__ == '__main__': data_folder = '../data' parser =", "import GridSearchCV from sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split from sklearn.feature_extraction import", "def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age': float, 'height': float,", "csv.DictReader(csvfile) for row in reader: for col_name in reader.fieldnames: col_type = col_types[col_name] row[col_name]", "'age': float, 'height': float, 'weight': float, 'amiodarone': int, 'decr': int, 'cyp2c9': str, 'vkorc1':", "args.data if dataset == 'eyedata': X, y, featnames = load_eyedata(data_folder) if dataset ==", "scale(data) X, y = data[:, :-1], data[:, -1] featnames = np.array( list(map(lambda i:", "in reader: for col_name in reader.fieldnames: col_type = col_types[col_name] row[col_name] = col_type(row[col_name]) #", "del row[col_name] X.append(row) dv = DictVectorizer() X = dv.fit_transform(X) y = np.array(y) featnames", "dataset = args.data if dataset == 'eyedata': X, y, featnames = load_eyedata(data_folder) if", "featnames = np.array(dv.get_feature_names()) return X, y, featnames if __name__ == '__main__': data_folder =", ": ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params,", "col_types[col_name] row[col_name] = col_type(row[col_name]) # cast to correct type if col_name == 'dose':", "y, featnames if __name__ == '__main__': data_folder = '../data' parser = argparse.ArgumentParser() parser.add_argument('data',", "'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5,", "== 'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv = DictVectorizer() X = dv.fit_transform(X) y", "row in reader: for col_name in reader.fieldnames: col_type = col_types[col_name] row[col_name] = col_type(row[col_name])", "csv import time import numpy as np import argparse import warnings warnings.filterwarnings('ignore') from", "'{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data) X, y = data[:,", "datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age': float, 'height': float, 'weight': float,", "verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search", "(50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy", "= np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X, y, featnames def load_iwpc(data_folder): datafile", "= DictVectorizer() X = dv.fit_transform(X) y = np.array(y) featnames = np.array(dv.get_feature_names()) return X,", "np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X, y, featnames def load_iwpc(data_folder): datafile =", "np.array(dv.get_feature_names()) return X, y, featnames if __name__ == '__main__': data_folder = '../data' parser", "to correct type if col_name == 'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv =", "'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid", "np import argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from sklearn.model_selection import", "= parser.parse_args() dataset = args.data if dataset == 'eyedata': X, y, featnames =", "train_test_split from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile,", "if col_name == 'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv = DictVectorizer() X =", "dataset == 'iwpc': X, y, featnames = load_iwpc(data_folder) train_X, test_X, train_y, test_y =", "open(datafile) as csvfile: reader = csv.DictReader(csvfile) for row in reader: for col_name in", "-1] featnames = np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X, y, featnames def", "csvfile: reader = csv.DictReader(csvfile) for row in reader: for col_name in reader.fieldnames: col_type", "reader.fieldnames: col_type = col_types[col_name] row[col_name] = col_type(row[col_name]) # cast to correct type if", "warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPRegressor", "correct type if col_name == 'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv = DictVectorizer()", "random_state=9) time params = { 'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'solver' :", "import time import numpy as np import argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing", "GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy :", "data to use') args = parser.parse_args() dataset = args.data if dataset == 'eyedata':", "from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1,", "help='Specify the data to use') args = parser.parse_args() dataset = args.data if dataset", "= [], [] with open(datafile) as csvfile: reader = csv.DictReader(csvfile) for row in", "data[:, :-1], data[:, -1] featnames = np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X,", ": ['identity', 'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)],", "y = data[:, :-1], data[:, -1] featnames = np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1]))))", "cast to correct type if col_name == 'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv", "= scale(data) X, y = data[:, :-1], data[:, -1] featnames = np.array( list(map(lambda", "col_types = {'race': str, 'age': float, 'height': float, 'weight': float, 'amiodarone': int, 'decr':", "warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from sklearn.model_selection import GridSearchCV from sklearn.neural_network import", "type if col_name == 'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv = DictVectorizer() X", "X, y, featnames = load_iwpc(data_folder) train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9)", "data[:, -1] featnames = np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X, y, featnames", "'{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age': float, 'height': float, 'weight': float, 'amiodarone': int,", "numpy as np import argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from", "'{:03}'.format(i), range(X.shape[1])))) return X, y, featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types =", "= load_iwpc(data_folder) train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9) time params =", "{'race': str, 'age': float, 'height': float, 'weight': float, 'amiodarone': int, 'decr': int, 'cyp2c9':", "GridSearchCV from sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer", "dv = DictVectorizer() X = dv.fit_transform(X) y = np.array(y) featnames = np.array(dv.get_feature_names()) return", "int, 'cyp2c9': str, 'vkorc1': str, 'dose': float} X, y = [], [] with", "return X, y, featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str,", "X, y, featnames = load_eyedata(data_folder) if dataset == 'iwpc': X, y, featnames =", "list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X, y, featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder)", "range(X.shape[1])))) return X, y, featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race':", "time params = { 'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs',", "'height': float, 'weight': float, 'amiodarone': int, 'decr': int, 'cyp2c9': str, 'vkorc1': str, 'dose':", "parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use') args =", "X, y, featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age':", "= load_eyedata(data_folder) if dataset == 'iwpc': X, y, featnames = load_iwpc(data_folder) train_X, test_X,", "} mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y))", "as csvfile: reader = csv.DictReader(csvfile) for row in reader: for col_name in reader.fieldnames:", "train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9) time params = { 'activation'", "time import numpy as np import argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import", "row[col_name] = col_type(row[col_name]) # cast to correct type if col_name == 'dose': y.append(row[col_name])", "',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_) print('Best Parameters : ',mlp_clf_grid.best_params_) print('Best Estimators:", "if __name__ == '__main__': data_folder = '../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'],", "# cast to correct type if col_name == 'dose': y.append(row[col_name]) del row[col_name] X.append(row)", "train_test_split(X, y, random_state=9) time params = { 'activation' : ['identity', 'logistic', 'tanh', 'relu'],", "str, 'vkorc1': str, 'dose': float} X, y = [], [] with open(datafile) as", "datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data) X, y", "import MLPRegressor from sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile", "['identity', 'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350]", "params = { 'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd',", "args = parser.parse_args() dataset = args.data if dataset == 'eyedata': X, y, featnames", "MLPRegressor from sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile =", "reader = csv.DictReader(csvfile) for row in reader: for col_name in reader.fieldnames: col_type =", "argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from sklearn.model_selection import GridSearchCV from", "= np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data) X, y = data[:, :-1], data[:,", "choices=['eyedata', 'iwpc'], help='Specify the data to use') args = parser.parse_args() dataset = args.data", "col_type(row[col_name]) # cast to correct type if col_name == 'dose': y.append(row[col_name]) del row[col_name]", "'iwpc'], help='Specify the data to use') args = parser.parse_args() dataset = args.data if", "np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data) X, y = data[:, :-1], data[:, -1]", "= {'race': str, 'age': float, 'height': float, 'weight': float, 'amiodarone': int, 'decr': int,", "use') args = parser.parse_args() dataset = args.data if dataset == 'eyedata': X, y,", "def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data)", "n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y))", "'__main__': data_folder = '../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data", "DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',') data =", "col_name in reader.fieldnames: col_type = col_types[col_name] row[col_name] = col_type(row[col_name]) # cast to correct", "dataset == 'eyedata': X, y, featnames = load_eyedata(data_folder) if dataset == 'iwpc': X,", "import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from sklearn.model_selection import GridSearchCV from sklearn.neural_network", "sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',')", "'weight': float, 'amiodarone': int, 'decr': int, 'cyp2c9': str, 'vkorc1': str, 'dose': float} X,", "import scale from sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPRegressor from sklearn.model_selection import", "X.append(row) dv = DictVectorizer() X = dv.fit_transform(X) y = np.array(y) featnames = np.array(dv.get_feature_names())", "== 'iwpc': X, y, featnames = load_iwpc(data_folder) train_X, test_X, train_y, test_y = train_test_split(X,", "float, 'amiodarone': int, 'decr': int, 'cyp2c9': str, 'vkorc1': str, 'dose': float} X, y", "test_X, train_y, test_y = train_test_split(X, y, random_state=9) time params = { 'activation' :", "X, y = [], [] with open(datafile) as csvfile: reader = csv.DictReader(csvfile) for", "y, featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age': float,", "sklearn.preprocessing import scale from sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPRegressor from sklearn.model_selection", "X, y = data[:, :-1], data[:, -1] featnames = np.array( list(map(lambda i: '{:03}'.format(i),", "the data to use') args = parser.parse_args() dataset = args.data if dataset ==", "= argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use') args = parser.parse_args()", "test_y = train_test_split(X, y, random_state=9) time params = { 'activation' : ['identity', 'logistic',", "= col_type(row[col_name]) # cast to correct type if col_name == 'dose': y.append(row[col_name]) del", "for row in reader: for col_name in reader.fieldnames: col_type = col_types[col_name] row[col_name] =", "np.array(y) featnames = np.array(dv.get_feature_names()) return X, y, featnames if __name__ == '__main__': data_folder", "'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,),", "'amiodarone': int, 'decr': int, 'cyp2c9': str, 'vkorc1': str, 'dose': float} X, y =", "data = scale(data) X, y = data[:, :-1], data[:, -1] featnames = np.array(", "'eyedata': X, y, featnames = load_eyedata(data_folder) if dataset == 'iwpc': X, y, featnames", "import argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from sklearn.model_selection import GridSearchCV", "featnames = load_eyedata(data_folder) if dataset == 'iwpc': X, y, featnames = load_iwpc(data_folder) train_X,", "[(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train", "float, 'weight': float, 'amiodarone': int, 'decr': int, 'cyp2c9': str, 'vkorc1': str, 'dose': float}", "featnames def load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age': float, 'height':", "mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best", "= col_types[col_name] row[col_name] = col_type(row[col_name]) # cast to correct type if col_name ==", "'decr': int, 'cyp2c9': str, 'vkorc1': str, 'dose': float} X, y = [], []", "X = dv.fit_transform(X) y = np.array(y) featnames = np.array(dv.get_feature_names()) return X, y, featnames", "in reader.fieldnames: col_type = col_types[col_name] row[col_name] = col_type(row[col_name]) # cast to correct type", "parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use') args = parser.parse_args() dataset =", "DictVectorizer() X = dv.fit_transform(X) y = np.array(y) featnames = np.array(dv.get_feature_names()) return X, y,", "load_iwpc(data_folder): datafile = '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age': float, 'height': float, 'weight':", ": ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_) print('Best", "y, featnames = load_iwpc(data_folder) train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9) time", "sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data", "y, random_state=9) time params = { 'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'solver'", "from sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder)", "col_name == 'dose': y.append(row[col_name]) del row[col_name] X.append(row) dv = DictVectorizer() X = dv.fit_transform(X)", "print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy", "'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] }", "X, y, featnames if __name__ == '__main__': data_folder = '../data' parser = argparse.ArgumentParser()", "'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1)", "to use') args = parser.parse_args() dataset = args.data if dataset == 'eyedata': X,", "import csv import time import numpy as np import argparse import warnings warnings.filterwarnings('ignore')", "= args.data if dataset == 'eyedata': X, y, featnames = load_eyedata(data_folder) if dataset", "reader: for col_name in reader.fieldnames: col_type = col_types[col_name] row[col_name] = col_type(row[col_name]) # cast", "y, featnames = load_eyedata(data_folder) if dataset == 'iwpc': X, y, featnames = load_iwpc(data_folder)", "scale from sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split", "= dv.fit_transform(X) y = np.array(y) featnames = np.array(dv.get_feature_names()) return X, y, featnames if", "cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid", "sklearn.model_selection import GridSearchCV from sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split from sklearn.feature_extraction", "= data[:, :-1], data[:, -1] featnames = np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return", "int, 'decr': int, 'cyp2c9': str, 'vkorc1': str, 'dose': float} X, y = [],", "'vkorc1': str, 'dose': float} X, y = [], [] with open(datafile) as csvfile:", "featnames = load_iwpc(data_folder) train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9) time params", "import numpy as np import argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale", "'iwpc': X, y, featnames = load_iwpc(data_folder) train_X, test_X, train_y, test_y = train_test_split(X, y,", "'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y)", "with open(datafile) as csvfile: reader = csv.DictReader(csvfile) for row in reader: for col_name", "mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test", "= np.array(dv.get_feature_names()) return X, y, featnames if __name__ == '__main__': data_folder = '../data'", "= np.array(y) featnames = np.array(dv.get_feature_names()) return X, y, featnames if __name__ == '__main__':", "load_eyedata(data_folder) if dataset == 'iwpc': X, y, featnames = load_iwpc(data_folder) train_X, test_X, train_y,", "skiprows=1, delimiter=',') data = scale(data) X, y = data[:, :-1], data[:, -1] featnames", "= GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1) mlp_clf_grid.fit(train_X,train_y) print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy", "y = [], [] with open(datafile) as csvfile: reader = csv.DictReader(csvfile) for row", "'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid =", "import DictVectorizer def load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',') data", "== '__main__': data_folder = '../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the", "',mlp_clf_grid.best_estimator_.score(train_X,train_y)) print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_) print('Best Parameters", "argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use') args = parser.parse_args() dataset", "'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)], 'max_iter':[200,250,300,350] } mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9),", "{ 'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'], 'hidden_layer_sizes':", ":-1], data[:, -1] featnames = np.array( list(map(lambda i: '{:03}'.format(i), range(X.shape[1])))) return X, y,", "'dose': float} X, y = [], [] with open(datafile) as csvfile: reader =", "[] with open(datafile) as csvfile: reader = csv.DictReader(csvfile) for row in reader: for", "str, 'dose': float} X, y = [], [] with open(datafile) as csvfile: reader", "parser.parse_args() dataset = args.data if dataset == 'eyedata': X, y, featnames = load_eyedata(data_folder)", "if dataset == 'iwpc': X, y, featnames = load_iwpc(data_folder) train_X, test_X, train_y, test_y", "load_eyedata(data_folder): datafile = '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data) X,", "test_y)) print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_) print('Best Parameters : ',mlp_clf_grid.best_params_) print('Best Estimators: ',mlp_clf_grid.best_estimator_)", "= '{}/eyedata.csv'.format(data_folder) data = np.loadtxt(datafile, skiprows=1, delimiter=',') data = scale(data) X, y =", "= '{}/iwpc-scaled.csv'.format(data_folder) col_types = {'race': str, 'age': float, 'height': float, 'weight': float, 'amiodarone':", "= train_test_split(X, y, random_state=9) time params = { 'activation' : ['identity', 'logistic', 'tanh',", "data_folder = '../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to", "delimiter=',') data = scale(data) X, y = data[:, :-1], data[:, -1] featnames =", "[], [] with open(datafile) as csvfile: reader = csv.DictReader(csvfile) for row in reader:", "= csv.DictReader(csvfile) for row in reader: for col_name in reader.fieldnames: col_type = col_types[col_name]", "y.append(row[col_name]) del row[col_name] X.append(row) dv = DictVectorizer() X = dv.fit_transform(X) y = np.array(y)", "'../data' parser = argparse.ArgumentParser() parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use') args", "= { 'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'solver' : ['lbfgs', 'sgd', 'adam'],", "as np import argparse import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import scale from sklearn.model_selection" ]
[ "batch in tqdm(dataloader): batch = batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot = []", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "permissions and # limitations under the License. # ============================================================================ \"\"\" prepare train data", "speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare", "audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"]) start =", "= batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply", "0 : wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug", "None if loader_kwargs.get(\"shuffle\", False) is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del", "for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for batch in tqdm(dataloader): batch", "lens = batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]): #", "feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts = ct.timestamp()", "int(stop) num_frames = stop - start sig, _ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start", "os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep (parsing VoxCeleb and annotation into", "with command-line overrides with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download verification", ": wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs =", "_ = dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler = None", "stop, duration): if params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"]) start = random.randint(0, duration_sample", "if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating", "2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop,", "this file except in compliance with the License. # You may obtain a", "datetime import datetime import numpy as np from tqdm import tqdm import torch", "src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates the datasets and their data processing pipelines.\"", "wavs = torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens = torch.cat([lens] * n_augment) #", "pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]:", "encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\",", "verification sentences from train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\")", "\"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load hyper param\")", "wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug", "wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens = torch.cat([lens]", "spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder: # Load", "= torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens = torch.cat([lens] * n_augment) # Feature", "Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0", "ANY KIND, either express or implied. # See the License for the specific", "= label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder: # Load or", "os import sys import random from datetime import datetime import numpy as np", "overrides with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download verification file\") #", "start, stop, duration): if params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"]) start = random.randint(0,", "not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating Dataset", "**loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for", "np from tqdm import tqdm import torch import torchaudio from hyperpyyaml import load_hyperpyyaml", "and proper encodings for phones train_data, valid_data, _ = dataio_prep(hparams) print(\"len of train:\",", ") sig = sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text", "numpy as np from tqdm import tqdm import torch import torchaudio from hyperpyyaml", "else: wavs = wavs_aug wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment =", "label_encoder if __name__ == \"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done')", "or compute the label encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\")", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "1) stop = start + snt_len_sample else: start = int(start) stop = int(stop)", "wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0] =", "zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0] = wavs wavs =", "import load_hyperpyyaml import speechbrain as sb from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import", "= datetime.now() ts = ct.timestamp() id_save_name = str(ts) + \"_id.npy\" fea_save_name = str(ts)", "valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define audio", "run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load hyper param\") # Load", "\"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\",", "from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from", "# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "Dataset prep (parsing VoxCeleb and annotation into csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\":", "@sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]: duration_sample", "# 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if", "= wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]]", "Download verification list (to exclude verification sentences from train) veri_file_path = os.path.join( hparams[\"save_folder\"],", "from datetime import datetime import numpy as np from tqdm import tqdm import", "@sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"])", "OF ANY KIND, either express or implied. # See the License for the", "lens = torch.cat([lens] * n_augment) # Feature extraction and normalization feats = hparams[\"compute_features\"](wavs)", "# limitations under the License. # ============================================================================ \"\"\" prepare train data \"\"\" import", "= open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for", "Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License,", "encodings for phones train_data, valid_data, _ = dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs", "torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)", "yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder: # Load or compute the", "IO prep: creating Dataset objects and proper encodings for phones train_data, valid_data, _", "len(wavs_aug_tot) lens = torch.cat([lens] * n_augment) # Feature extraction and normalization feats =", "= sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder()", "= augment(wavs, lens) # Managing speed change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug =", "label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define audio pipeline:", "ts = ct.timestamp() id_save_name = str(ts) + \"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\"", "wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] =", "processing pipelines.\" data_folder = params[\"data_folder\"] # 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\":", "VoxCeleb and annotation into csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"],", "in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for batch in tqdm(dataloader): batch = batch.to('cpu')", "\"Creates the datasets and their data processing pipelines.\" data_folder = params[\"data_folder\"] # 1.", "return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def", "if loader_kwargs.get(\"shuffle\", False) is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"]", "open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download verification file\") # Download verification", "= sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir,", ") # 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder", "Co., Ltd # # Licensed under the Apache License, Version 2.0 (the \"License\");", "datetime.now() ts = ct.timestamp() id_save_name = str(ts) + \"_id.npy\" fea_save_name = str(ts) +", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "= batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count, augment", "veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, )", "+ \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name),", "(\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io", "random.randint(0, duration_sample - snt_len_sample - 1) stop = start + snt_len_sample else: start", "the label encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "data processing pipelines.\" data_folder = params[\"data_folder\"] # 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"],", "from hyperpyyaml import load_hyperpyyaml import speechbrain as sb from speechbrain.utils.data_utils import download_file from", "wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug = augment(wavs, lens)", "[train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define", "replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"]", ") fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch", "* params[\"sample_rate\"]) start = random.randint(0, duration_sample - snt_len_sample - 1) stop = start", "for phones train_data, valid_data, _ = dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs =", "= str(ts) + \"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]: duration_sample = int(duration", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "snt_len_sample else: start = int(start) stop = int(stop) num_frames = stop - start", "sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs", "sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"),", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "\"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io prep\") if", "lens) # Managing speed change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:, 0", "io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO", "train data \"\"\" import os import sys import random from datetime import datetime", "hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\", False) is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"]", "required by applicable law or agreed to in writing, software # distributed under", "\"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]: duration_sample = int(duration *", "lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set output:", "str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir,", "import prepare_voxceleb def dataio_prep(params): \"Creates the datasets and their data processing pipelines.\" data_folder", "\"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if __name__ == \"__main__\": hparams_file, run_opts, overrides =", "path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return", "============================================================================ \"\"\" prepare train data \"\"\" import os import sys import random from", "applicable law or agreed to in writing, software # distributed under the License", "= ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs )", "= ct.timestamp() id_save_name = str(ts) + \"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\" spkid", "cnt = 0 for batch in tqdm(dataloader): batch = batch.to('cpu') wavs, lens =", "= open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]):", "train_data, valid_data, _ = dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler", "save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating Dataset objects and proper encodings", "# Load or compute the label encoder (with multi-GPU DDP support) lab_enc_file =", "loader_kwargs.get(\"shuffle\", False) is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader", "objects and proper encodings for phones train_data, valid_data, _ = dataio_prep(hparams) print(\"len of", "or agreed to in writing, software # distributed under the License is distributed", "if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig =", "datetime import numpy as np from tqdm import tqdm import torch import torchaudio", "torchaudio from hyperpyyaml import load_hyperpyyaml import speechbrain as sb from speechbrain.utils.data_utils import download_file", "= os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep (parsing VoxCeleb", "label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit", "change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "under the License. # ============================================================================ \"\"\" prepare train data \"\"\" import os import", "License. # ============================================================================ \"\"\" prepare train data \"\"\" import os import sys import", "= wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0]", "load_hyperpyyaml import speechbrain as sb from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main", "sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield", "as np from tqdm import tqdm import torch import torchaudio from hyperpyyaml import", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens = torch.cat([lens] * n_augment) # Feature extraction", "writing, software # distributed under the License is distributed on an \"AS IS\"", "speechbrain as sb from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler", "= load_hyperpyyaml(fin, overrides) print(\"download verification file\") # Download verification list (to exclude verification", "batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "lens) ct = datetime.now() ts = ct.timestamp() id_save_name = str(ts) + \"_id.npy\" fea_save_name", "License. # You may obtain a copy of the License at # #", "= dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler = None if", "wavs.shape[1]: wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0", "1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\")", "= sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir,", "as fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download verification file\") # Download verification list", "compliance with the License. # You may obtain a copy of the License", "<reponame>mindspore-ai/models # Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the", "* n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name + \"\\n\") fea_fp.write(fea_save_name", "[\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if __name__ == \"__main__\": hparams_file, run_opts,", "2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version", "import torchaudio from hyperpyyaml import load_hyperpyyaml import speechbrain as sb from speechbrain.utils.data_utils import", "wavs = wavs_aug wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot)", "zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else:", "annotation into csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path,", "@sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False)", "tqdm import torch import torchaudio from hyperpyyaml import load_hyperpyyaml import speechbrain as sb", "3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded =", "audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id", "for count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug = augment(wavs, lens) #", "wav, num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) #", "= 0 for batch in tqdm(dataloader): batch = batch.to('cpu') wavs, lens = batch.sig", "language governing permissions and # limitations under the License. # ============================================================================ \"\"\" prepare", "# 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start,", "exclude verification sentences from train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path)", "True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data,", "hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load hyper param\") #", "# Feature extraction and normalization feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct", "not use this file except in compliance with the License. # You may", "sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample", "from train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset", "normalization feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts =", "their data processing pipelines.\" data_folder = params[\"data_folder\"] # 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv(", "License, Version 2.0 (the \"License\"); # you may not use this file except", "(with multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", )", "wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0] = wavs", "os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating Dataset objects and", "id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name + \"\\n\") fea_fp.write(fea_save_name + \"\\n\") cnt +=", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "dim=0) n_augment = len(wavs_aug_tot) lens = torch.cat([lens] * n_augment) # Feature extraction and", "os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\",", "spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name + \"\\n\") fea_fp.write(fea_save_name + \"\\n\") cnt += 1", "into csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\":", "data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader, valid_data_reader]", "\"_fea.npy\" spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy())", "snt_len_sample - 1) stop = start + snt_len_sample else: start = int(start) stop", "# Managing speed change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:, 0 :", "# you may not use this file except in compliance with the License.", "start + snt_len_sample else: start = int(start) stop = int(stop) num_frames = stop", "sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\",", "if params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"]) start = random.randint(0, duration_sample - snt_len_sample", "tqdm import tqdm import torch import torchaudio from hyperpyyaml import load_hyperpyyaml import speechbrain", "wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens = torch.cat([lens] * n_augment)", "list (to exclude verification sentences from train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) )", "agreed to in writing, software # distributed under the License is distributed on", "duration_sample = int(duration * params[\"sample_rate\"]) start = random.randint(0, duration_sample - snt_len_sample - 1)", "= int(duration * params[\"sample_rate\"]) start = random.randint(0, duration_sample - snt_len_sample - 1) stop", "4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if __name__", "and # limitations under the License. # ============================================================================ \"\"\" prepare train data \"\"\"", "stop = start + snt_len_sample else: start = int(start) stop = int(stop) num_frames", "import numpy as np from tqdm import tqdm import torch import torchaudio from", "hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts = ct.timestamp() id_save_name =", "random from datetime import datetime import numpy as np from tqdm import tqdm", "(the \"License\"); # you may not use this file except in compliance with", "0 : wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs", "str(ts) + \"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid =", "= int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\")", "def dataio_prep(params): \"Creates the datasets and their data processing pipelines.\" data_folder = params[\"data_folder\"]", "verification list (to exclude verification sentences from train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"])", "= torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets,", "specific language governing permissions and # limitations under the License. # ============================================================================ \"\"\"", "loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp =", "sb from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler", "# Unless required by applicable law or agreed to in writing, software #", "duration): if params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"]) start = random.randint(0, duration_sample -", "by applicable law or agreed to in writing, software # distributed under the", "spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder: #", "wavs_aug = augment(wavs, lens) # Managing speed change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug", "'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates", "= sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\",", "= torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]:", "output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if __name__ == \"__main__\":", "+ \"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data]", "from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def", "sig = sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline:", "Apply augment wavs_aug = augment(wavs, lens) # Managing speed change if wavs_aug.shape[1] >", "loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp", "and annotation into csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\":", "print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset", "file except in compliance with the License. # You may obtain a copy", "limitations under the License. # ============================================================================ \"\"\" prepare train data \"\"\" import os", "id_save_name = str(ts) + \"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data", "of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\", False) is", "import random from datetime import datetime import numpy as np from tqdm import", "extraction and normalization feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now()", "spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name", "file with command-line overrides with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download", "batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy())", "import speechbrain as sb from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main from", "import sys import random from datetime import datetime import numpy as np from", "License for the specific language governing permissions and # limitations under the License.", "the datasets and their data processing pipelines.\" data_folder = params[\"data_folder\"] # 1. Declarations:", "sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder: # Load or compute the label encoder", "= str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0)", "train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep", "audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration): if", "label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"])", "to in writing, software # distributed under the License is distributed on an", "'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for batch in tqdm(dataloader):", "num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3.", "tqdm(dataloader): batch = batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "- start sig, _ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0,", "params[\"data_folder\"] # 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader =", "files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"),", "batch = batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count,", ") download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep (parsing VoxCeleb and annotation into csv", "os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating Dataset objects", "1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"],", "spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir,", "def audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"]) start", "fea_save_name = str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment,", "veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep (parsing", "or implied. # See the License for the specific language governing permissions and", "- 1) stop = start + snt_len_sample else: start = int(start) stop =", "fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in", "int(start) stop = int(stop) num_frames = stop - start sig, _ = torchaudio.load(", "= [] wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug =", "[] wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug = augment(wavs,", "dataio_prep(params): \"Creates the datasets and their data processing pipelines.\" data_folder = params[\"data_folder\"] #", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "prep: creating Dataset objects and proper encodings for phones train_data, valid_data, _ =", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "# Load hyperparameters file with command-line overrides with open(hparams_file) as fin: hparams =", "kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10),", "wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs)", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\",", "= os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating Dataset objects and proper encodings for", "wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug wavs_aug =", "csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets =", "> wavs.shape[1]: wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:,", "else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig", "0 for batch in tqdm(dataloader): batch = batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot", "Feature extraction and normalization feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct =", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if __name__ == \"__main__\": hparams_file, run_opts, overrides", "\"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) #", "sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id):", "= sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, )", "ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp", "verification file\") # Download verification list (to exclude verification sentences from train) veri_file_path", "start sig, _ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1)", "= int(start) stop = int(stop) num_frames = stop - start sig, _ =", "del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w')", "ct.timestamp() id_save_name = str(ts) + \"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\" spkid =", ") print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) #", "= stop - start sig, _ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig", "veri_file_path) print(\"data_prep\") # Dataset prep (parsing VoxCeleb and annotation into csv files) run_on_main(", "n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name + \"\\n\") fea_fp.write(fea_save_name +", "= sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\")", "import torch import torchaudio from hyperpyyaml import load_hyperpyyaml import speechbrain as sb from", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "# 3. Fit encoder: # Load or compute the label encoder (with multi-GPU", "return train_data_reader, valid_data_reader, label_encoder if __name__ == \"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])", "you may not use this file except in compliance with the License. #", "= None if loader_kwargs.get(\"shuffle\", False) is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler", "parameters done') print(\"start load hyper param\") # Load hyperparameters file with command-line overrides", "pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded", "load hyper param\") # Load hyperparameters file with command-line overrides with open(hparams_file) as", "int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\")", "\"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration): if params[\"random_chunk\"]: duration_sample =", "= zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0] = wavs wavs", "replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader,", "Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id])", "the License. # ============================================================================ \"\"\" prepare train data \"\"\" import os import sys", "start = random.randint(0, duration_sample - snt_len_sample - 1) stop = start + snt_len_sample", "start = int(start) stop = int(stop) num_frames = stop - start sig, _", "loader_kwargs = hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\", False) is True: sampler =", "use this file except in compliance with the License. # You may obtain", "ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates the datasets and their data", "os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating Dataset objects and proper encodings for phones", "as sb from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import", "prepare train data \"\"\" import os import sys import random from datetime import", ") valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader, valid_data_reader] label_encoder", "3. Fit encoder: # Load or compute the label encoder (with multi-GPU DDP", "train_data_reader, valid_data_reader, label_encoder if __name__ == \"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse", "False) is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader =", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "__name__ == \"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load", "stop = int(stop) num_frames = stop - start sig, _ = torchaudio.load( wav,", "DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4.", "= wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens = torch.cat([lens] *", "from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader,", "output_key=\"spk_id\", ) # 4. Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader,", "Managing speed change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]", "dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\",", "2.0 (the \"License\"); # you may not use this file except in compliance", "from tqdm import tqdm import torch import torchaudio from hyperpyyaml import load_hyperpyyaml import", "wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 :", "zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig if", "open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for batch", "augment(wavs, lens) # Managing speed change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:,", "print(\"start load hyper param\") # Load hyperparameters file with command-line overrides with open(hparams_file)", "(to exclude verification sentences from train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"],", "for the specific language governing permissions and # limitations under the License. #", "epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for batch in tqdm(dataloader): batch =", "\"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]),", "# Dataset IO prep: creating Dataset objects and proper encodings for phones train_data,", "= sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load hyper param\") # Load hyperparameters file", "= start + snt_len_sample else: start = int(start) stop = int(stop) num_frames =", "is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] = sampler del loader_kwargs[\"shuffle\"] dataloader = sb.dataio.dataloader.make_dataloader(", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "+ snt_len_sample else: start = int(start) stop = int(stop) num_frames = stop -", "import datetime import numpy as np from tqdm import tqdm import torch import", "augment wavs_aug = augment(wavs, lens) # Managing speed change if wavs_aug.shape[1] > wavs.shape[1]:", "# Dataset prep (parsing VoxCeleb and annotation into csv files) run_on_main( prepare_voxceleb, kwargs={", "torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name + \"\\n\")", "sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if __name__ == \"__main__\": hparams_file,", "= len(wavs_aug_tot) lens = torch.cat([lens] * n_augment) # Feature extraction and normalization feats", "# # Unless required by applicable law or agreed to in writing, software", "datasets = [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) #", "express or implied. # See the License for the specific language governing permissions", "= [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2.", "download_file from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb", "hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts = ct.timestamp() id_save_name = str(ts) + \"_id.npy\"", "import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates the datasets and their", "os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep (parsing VoxCeleb and", "either express or implied. # See the License for the specific language governing", "datasets and their data processing pipelines.\" data_folder = params[\"data_folder\"] # 1. Declarations: train_data_reader", "= torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name +", "compute the label encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create(", "params[\"sentence_len\"]) # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav,", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch)", "hyperparameters file with command-line overrides with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides)", "download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep (parsing VoxCeleb and annotation into csv files)", "in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug = augment(wavs, lens) # Managing speed change", "if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot,", "snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"]) # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\",", "@sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets,", "sys import random from datetime import datetime import numpy as np from tqdm", "exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep: creating Dataset objects and proper", "wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment", "params[\"random_chunk\"]: duration_sample = int(duration * params[\"sample_rate\"]) start = random.randint(0, duration_sample - snt_len_sample -", "train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder},", "the License. # You may obtain a copy of the License at #", "= hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts = ct.timestamp() id_save_name", "int(duration * params[\"sample_rate\"]) start = random.randint(0, duration_sample - snt_len_sample - 1) stop =", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name + \"\\n\") fea_fp.write(fea_save_name + \"\\n\")", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "else: start = int(start) stop = int(stop) num_frames = stop - start sig,", "\"_id.npy\" fea_save_name = str(ts) + \"_fea.npy\" spkid = batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] *", "spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder: # Load or compute the label", "from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates the datasets", "hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") # Dataset prep (parsing VoxCeleb and annotation", "count, augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug = augment(wavs, lens) # Managing", "Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the", "label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0", "sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load hyper param\") # Load hyperparameters file with", "\"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt", "in tqdm(dataloader): batch = batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs)", "\"label.lst\"), 'w') for epoch in range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for batch in", "yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder:", "command-line overrides with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download verification file\")", "sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"),", "params[\"sample_rate\"]) start = random.randint(0, duration_sample - snt_len_sample - 1) stop = start +", "n_augment = len(wavs_aug_tot) lens = torch.cat([lens] * n_augment) # Feature extraction and normalization", "\"\"\" import os import sys import random from datetime import datetime import numpy", "run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates the", "if __name__ == \"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start", "csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample =", "range(hparams[\"number_of_epochs\"]): sampler.set_epoch(epoch) cnt = 0 for batch in tqdm(dataloader): batch = batch.to('cpu') wavs,", "with the License. # You may obtain a copy of the License at", "- snt_len_sample - 1) stop = start + snt_len_sample else: start = int(start)", "ct = datetime.now() ts = ct.timestamp() id_save_name = str(ts) + \"_id.npy\" fea_save_name =", "\"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data", "text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield", "(parsing VoxCeleb and annotation into csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\":", "sentences from train) veri_file_path = os.path.join( hparams[\"save_folder\"], os.path.basename(hparams[\"verification_file\"]) ) download_file(hparams[\"verification_file\"], veri_file_path) print(\"data_prep\") #", "train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp = open(os.path.join(save_dir, \"label.lst\"), 'w')", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "(90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io prep\") if not", "hyper param\") # Load hyperparameters file with command-line overrides with open(hparams_file) as fin:", "fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download verification file\") # Download verification list (to", "# Download verification list (to exclude verification sentences from train) veri_file_path = os.path.join(", "num_frames = stop - start sig, _ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start )", "data_folder}, ) datasets = [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] *", "law or agreed to in writing, software # distributed under the License is", ") datasets = [train_data_reader, valid_data_reader] label_encoder = sb.dataio.encoder.CategoricalEncoder() snt_len_sample = int(params[\"sample_rate\"] * params[\"sentence_len\"])", "the License for the specific language governing permissions and # limitations under the", "import tqdm import torch import torchaudio from hyperpyyaml import load_hyperpyyaml import speechbrain as", "Fit encoder: # Load or compute the label encoder (with multi-GPU DDP support)", "with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) print(\"download verification file\") # Download", "label_pipeline) # 3. Fit encoder: # Load or compute the label encoder (with", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "data \"\"\" import os import sys import random from datetime import datetime import", "\"\"\" prepare train data \"\"\" import os import sys import random from datetime", "len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\", False) is True: sampler", "for batch in tqdm(dataloader): batch = batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot =", "run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\":", "import os import sys import random from datetime import datetime import numpy as", "csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\",", "wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug", "np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name), feats.numpy()) label_fp.write(id_save_name + \"\\n\") fea_fp.write(fea_save_name + \"\\n\") cnt", "print(\"data_prep\") # Dataset prep (parsing VoxCeleb and annotation into csv files) run_on_main( prepare_voxceleb,", "sampler = None if loader_kwargs.get(\"shuffle\", False) is True: sampler = ReproducibleRandomSampler(train_data) loader_kwargs[\"sampler\"] =", "creating Dataset objects and proper encodings for phones train_data, valid_data, _ = dataio_prep(hparams)", "\"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] },", "_ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1) return sig", "= torch.cat([lens] * n_augment) # Feature extraction and normalization feats = hparams[\"compute_features\"](wavs) feats", "hyperpyyaml import load_hyperpyyaml import speechbrain as sb from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed", "in compliance with the License. # You may obtain a copy of the", "prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"]) # Dataset IO prep:", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates the datasets and", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "file\") # Download verification list (to exclude verification sentences from train) veri_file_path =", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "wavs, lens = batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count, augment in enumerate(hparams[\"augment_pipeline\"]):", "Dataset objects and proper encodings for phones train_data, valid_data, _ = dataio_prep(hparams) print(\"len", "overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load hyper param\") # Load hyperparameters", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Dataset IO prep: creating Dataset objects and proper encodings for phones train_data, valid_data,", "load_hyperpyyaml(fin, overrides) print(\"download verification file\") # Download verification list (to exclude verification sentences", "train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\", False) is True:", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug wavs_aug = zero_sig if hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug)", "wavs_aug wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens =", "support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set", "Load hyperparameters file with command-line overrides with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin,", "hparams[\"concat_augment\"]: wavs_aug_tot.append(wavs_aug) else: wavs = wavs_aug wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot, dim=0)", "valid_data_reader, label_encoder if __name__ == \"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters", "\"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\":", "stop - start sig, _ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig =", "# 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv(", "\"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io prep\")", "10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])):", "n_augment) # Feature extraction and normalization feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens)", "* params[\"sentence_len\"]) # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def", "\"skip_prep\": hparams[\"skip_prep\"] }, ) print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir", "\"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\":", "batch.to('cpu') wavs, lens = batch.sig wavs_aug_tot = [] wavs_aug_tot.append(wavs) for count, augment in", "speed change if wavs_aug.shape[1] > wavs.shape[1]: wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] else:", "Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\", \"start\", \"stop\", \"duration\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav, start, stop, duration):", "valid_data, _ = dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler =", "overrides) print(\"download verification file\") # Download verification list (to exclude verification sentences from", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"],", "}, ) print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir = os.path.join(hparams[\"feat_folder\"])", "multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) #", "import download_file from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import", "Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\":", "prep (parsing VoxCeleb and annotation into csv files) run_on_main( prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"],", "= wavs_aug wavs_aug_tot[0] = wavs wavs = torch.cat(wavs_aug_tot, dim=0) n_augment = len(wavs_aug_tot) lens", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "* n_augment) # Feature extraction and normalization feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats,", "print('parse parameters done') print(\"start load hyper param\") # Load hyperparameters file with command-line", "pipelines.\" data_folder = params[\"data_folder\"] # 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder},", "sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets", "print(\"len of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\", False)", "enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug = augment(wavs, lens) # Managing speed change if", "frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define", "print(\"download verification file\") # Download verification list (to exclude verification sentences from train)", "feats = hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts = ct.timestamp() id_save_name = str(ts)", "= hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts = ct.timestamp() id_save_name = str(ts) +", "torch.cat([lens] * n_augment) # Feature extraction and normalization feats = hparams[\"compute_features\"](wavs) feats =", "= hparams[\"dataloader_options\"] sampler = None if loader_kwargs.get(\"shuffle\", False) is True: sampler = ReproducibleRandomSampler(train_data)", "the specific language governing permissions and # limitations under the License. # ============================================================================", "prepare_voxceleb, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"save_folder\": hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90,", "from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params): \"Creates the datasets and their data processing", "sampler.set_epoch(epoch) cnt = 0 for batch in tqdm(dataloader): batch = batch.to('cpu') wavs, lens", "sig, _ = torchaudio.load( wav, num_frames=num_frames, frame_offset=start ) sig = sig.transpose(0, 1).squeeze(1) return", "def label_pipeline(spk_id): yield spk_id spk_id_encoded = label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3.", "= os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader], output_key=\"spk_id\", ) # 4. Set output: sb.dataio.dataset.set_output_keys(datasets,", "done') print(\"start load hyper param\") # Load hyperparameters file with command-line overrides with", "phones train_data, valid_data, _ = dataio_prep(hparams) print(\"len of train:\", len(train_data)) loader_kwargs = hparams[\"dataloader_options\"]", "data_folder = params[\"data_folder\"] # 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, )", "label encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\") label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data_reader],", "hparams[\"save_folder\"], \"verification_pairs_file\": veri_file_path, \"splits\": (\"train\", \"dev\"), \"split_ratio\": (90, 10), \"seg_dur\": hparams[\"sentence_len\"], \"skip_prep\": hparams[\"skip_prep\"]", "hparams = load_hyperpyyaml(fin, overrides) print(\"download verification file\") # Download verification list (to exclude", "Ltd # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "prepare_voxceleb def dataio_prep(params): \"Creates the datasets and their data processing pipelines.\" data_folder =", "# Apply augment wavs_aug = augment(wavs, lens) # Managing speed change if wavs_aug.shape[1]", "encoder: # Load or compute the label encoder (with multi-GPU DDP support) lab_enc_file", "param\") # Load hyperparameters file with command-line overrides with open(hparams_file) as fin: hparams", "= random.randint(0, duration_sample - snt_len_sample - 1) stop = start + snt_len_sample else:", "hparams[\"skip_prep\"] }, ) print(\"data io prep\") if not os.path.exists(os.path.join(hparams[\"feat_folder\"])): os.makedirs(os.path.join(hparams[\"feat_folder\"]), exist_ok=False) save_dir =", "duration_sample - snt_len_sample - 1) stop = start + snt_len_sample else: start =", "Load or compute the label encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams[\"save_folder\"],", "and their data processing pipelines.\" data_folder = params[\"data_folder\"] # 1. Declarations: train_data_reader =", "governing permissions and # limitations under the License. # ============================================================================ \"\"\" prepare train", "= int(stop) num_frames = stop - start sig, _ = torchaudio.load( wav, num_frames=num_frames,", "# ============================================================================ \"\"\" prepare train data \"\"\" import os import sys import random", ": wavs.shape[1]] else: zero_sig = torch.zeros_like(wavs) zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug wavs_aug", "and normalization feats = hparams[\"compute_features\"](wavs) feats = hparams[\"mean_var_norm\"](feats, lens) ct = datetime.now() ts", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "= batch.spk_id_encoded.data spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0) np.save(os.path.join(save_dir, id_save_name), spkid.numpy()) np.save(os.path.join(save_dir, fea_save_name),", "proper encodings for phones train_data, valid_data, _ = dataio_prep(hparams) print(\"len of train:\", len(train_data))", "speechbrain.utils.distributed import run_on_main from speechbrain.dataio.sampler import ReproducibleRandomSampler from src.voxceleb_prepare import prepare_voxceleb def dataio_prep(params):", "label_encoder.encode_sequence_torch([spk_id]) yield spk_id_encoded sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline) # 3. Fit encoder: # Load or compute", "torch import torchaudio from hyperpyyaml import load_hyperpyyaml import speechbrain as sb from speechbrain.utils.data_utils", "valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"valid_annotation\"], replacements={\"data_root\": data_folder}, ) datasets = [train_data_reader, valid_data_reader] label_encoder =", "augment in enumerate(hparams[\"augment_pipeline\"]): # Apply augment wavs_aug = augment(wavs, lens) # Managing speed", "== \"__main__\": hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) print('parse parameters done') print(\"start load hyper", "Set output: sb.dataio.dataset.set_output_keys(datasets, [\"id\", \"sig\", \"spk_id_encoded\"]) return train_data_reader, valid_data_reader, label_encoder if __name__ ==", "# 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"spk_id\") @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\") def label_pipeline(spk_id): yield spk_id spk_id_encoded", "= params[\"data_folder\"] # 1. Declarations: train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=params[\"train_annotation\"], replacements={\"data_root\": data_folder}, ) valid_data_reader", "dataloader = sb.dataio.dataloader.make_dataloader( train_data, **loader_kwargs ) fea_fp = open(os.path.join(save_dir, \"fea.lst\"), 'w') label_fp =" ]
[ "geodataframe geometries in reference to a bounding box e.g. of an image chip.", "geometry, needs to be in the same crs as the input geodataframe. explode_mp_:", "continue # Drop small geometries chip_df = chip_df[chip_df.geometry.area * (10 * 10) >", "polygons due to line overlap effects. poly = poly.buffer(0) return poly if isinstance(ingeo,", "1): for v in value: rcl_dict[v] = key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] =", "df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id = {} for i, (key, value)", "rows) of reference object (polygon or image, e.g. image chip. Returns: Result polygon", "with 0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid is False: return ingeo.buffer(0) else: return", "df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True) for i in", "432, 434, 440, 448, 449, 450, 487, 488, 489, 491, 493, 496, 497,", "and skips empty chips. Args: vector_df: Geodataframe containing the geometries to be cut", "keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue # Drop small", "if not row_idxs_mp: return df elif not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection", "to line overlap effects. poly = poly.buffer(0) return poly if isinstance(ingeo, Polygon): return", "chip pixelcoordinates and invert y-axis for COCO format. if not all(chip_df.geometry.is_empty): chip_df =", "geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column with class", "Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif explode_mp_ and keep_biggest_poly_: raise ValueError('You", "column with class labels. col_classids: column with class ids. drop_other_classes: Drop classes that", "[101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,", "origin of image bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx,", "scale is True: if ncols is None or nrows is None: raise ValueError('ncols", "else: continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window': chip_window,", "df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly, how='intersection') # Slower.", "geometry needs to be in the same projection as the geodataframe. Args: df:", "for COCO format. if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height)", "not row_idxs_mp: return df elif not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted", "image array nrows and ncols parameters. nrows: image array nrows, required for scale.", "to chip geometries. Filters small polygons and skips empty chips. Args: vector_df: Geodataframe", "poly = shapely.geometry.shape(geojson) if not poly.is_valid: # Too low precision can potentially lead", "ingeo: input geodataframe or shapely Polygon. precision: number of after comma values that", "= poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx, y - miny] for x, y", "= nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo,", "other grass, permanent grass, # wasteland, ..) } def reduce_precision(ingeo: Union[Polygon, GDF], precision:", "Result dataframe. \"\"\" if drop_other_classes is True: classes_to_drop = [v for values in", "544, 545, 547, 548, 549, 550, 551, 552, 553, 560, 561, 563, 570,", "Union, Dict import numpy as np from geopandas import GeoDataFrame as GDF from", "570, 579] # drop other non-crop related classes (forest related, environment, recreation, other", "for v in value: rcl_dict[v] = key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict)", "chip. Usage e.g. for COCOJson format. Args: ingeo: Input Polygon or geodataframe. reference_height:", "513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525,", "6 decimal places for latitude and longitude which equates to roughly 10cm of", "Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None, ncols: int=None ) -> Union[Polygon, GDF]: \"\"\"Converts", "524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 536, 539,", "tqdm import utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid polygons", "image array nrows, required for scale. ncols: image array ncols, required for scale.", "215, 224, 230, 234, 701, 702, 703, 704, 705], 'wintercereal': [10, 11, 13,", "explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict,", "id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend = { 'springcereal': [1, 2,", "16, 17, 22, 57, 220, 221, 222, 223, 235], 'maize': [5, 216], 'grassland':", "idx in row_idxs_mp: mp = df.loc[idx].geometry poly_areas = [p.area for p in mp]", "keep_biggest_poly function. Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon. Returns:", "elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return", "with the biggest polygon contained in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist()", "str= 'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify class label and class", "499, 501, 502, 503, 504, 505, 507, 509, 512, 513, 514, 515, 516,", "Polygon. precision: number of after comma values that should remain. Returns: Result polygon", "all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else:", "reference_height: Height (in coordinates or rows) of reference object (polygon or image, e.g.", "objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {}", "warnings from typing import Union, Dict import numpy as np from geopandas import", "to a bounding box e.g. of an image chip. Usage e.g. for COCOJson", "0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF):", "can potentially lead to invalid polygons due to line overlap effects. poly =", "input geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column with", "Dict, col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF, DF]:", "gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i, 'geometry']", "point of origin, scales to pixelcoordinates. Input: ingeo: input geodataframe or shapely Polygon.", "reference to a bounding box e.g. of an image chip. Usage e.g. for", "geodataframe to clipping geometry. The clipping geometry needs to be in the same", "origin=(0, 0, 0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif", "ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols))", "return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int ) -> Union[Polygon, GDF]: \"\"\"Invert", "pixelcoordinates. Input: ingeo: input geodataframe or shapely Polygon. reference_bounds: Bounding box object or", "Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None, ncols: int=None ) ->", "scale is False: return p_origin elif scale is True: if ncols is None", "[v for values in rcl_scheme.values() for v in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict", "Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp: return df elif not", "drop other non-crop related classes (forest related, environment, recreation, other grass, permanent grass,", "ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo:", "or geodataframe geometries in reference to a bounding box e.g. of an image", "51, 52, 53, 54, 55, 56, 57, 124, 160, 161, 280, 401, 402,", "the biggest polygon contained in the MultiPolygon. Returns: Result geodataframe. \"\"\" df =", "Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon or geodataframe geometries in reference to a", "15, 16, 17, 22, 57, 220, 221, 222, 223, 235], 'maize': [5, 216],", "nrows required for scale') x_scaler = ncols / w_poly y_scaler = nrows /", "which equates to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or", "required for scale. Returns: Result polygon or geodataframe, same type as input. \"\"\"", "bool=False, nrows: int=None, ncols: int=None ) -> Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates", "Only processes the first n image chips, used for debugging. Returns: Dictionary containing", "polygon contained in the MultiPolygon. Returns: Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry", "required for scale') x_scaler = ncols / w_poly y_scaler = nrows / h_poly", "first_n_chips=None): \"\"\"Workflow to cut a vector geodataframe to chip geometries. Filters small polygons", "enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break # # Clip geometry to chip chip_df", "y in zip(x_coords, y_coords)]) if scale is False: return p_origin elif scale is", "utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid polygons (due to", "is True: if ncols is None or nrows is None: raise ValueError('ncols and", "(in coordinates or rows) of reference object (polygon or image, e.g. image chip.", "ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF:", "if scale is False: return p_origin elif scale is True: if ncols is", "for i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True,", "and ncols parameters. nrows: image array nrows, required for scale. ncols: image array", "will integrate set_crs method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df:", "containing the geometries to be cut to chip geometries. raster_width: rasterio meta['width'] raster_height:", "reference object (polygon or image, e.g. image chip. Returns: Result polygon or geodataframe,", "# _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window': chip_window, 'chip_transform': chip_transform, 'chip_poly': chip_poly} return", "scale. ncols: image array ncols, required for scale. Returns: Result polygon or geodataframe,", "_p: _p.buffer(0)) return ingeo else: return ingeo def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF,", "bool = False, keep_biggest_poly_: bool = False, ) -> GDF: \"\"\"Filter and clip", "or shapely Polygon. precision: number of after comma values that should remain. Returns:", "that should remain. Returns: Result polygon or geodataframe, same type as input. \"\"\"", "if False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else: return", "geodataframe and resets its index. \"\"\" outdf = df[df.geom_type == 'Polygon'] df_mp =", "if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue # Drop small geometries", "input geodataframe or shapely Polygon. precision: number of after comma values that should", "poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for x, y in zip(x_coords, y_coords)])", "ids in a dataframe column. # TODO: Simplify & make more efficient! Args:", "\"\"\"Converts projected polygon coordinates to pixel coordinates of an image array. Subtracts point", "171, 172, 173, 174, 180, 182, 260, 261, 262, 263, 264, 266, 267,", "chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True)", "chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs =", "integrate set_crs method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF)", "continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window': chip_window, 'chip_transform':", "n image chips, used for debugging. Returns: Dictionary containing the final chip_df, chip_window,", "def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int ) -> Union[Polygon, GDF]: \"\"\"Invert y-axis of", "of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def", "grass, # wasteland, ..) } def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon,", "# TODO: Simplify & make more efficient! Args: df: input geodataframe. rcl_scheme: Reclassification", "number of after comma values that should remain. Returns: Result polygon or geodataframe,", "row_idxs_mp: mp = df.loc[idx].geometry poly_areas = [p.area for p in mp] max_area_poly =", "False, ) -> GDF: \"\"\"Filter and clip geodataframe to clipping geometry. The clipping", "Union[Polygon, GDF], reference_height: int ) -> Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon or", "514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526,", "as input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols): try: minx, miny,", "environment, recreation, other grass, permanent grass, # wasteland, ..) } def reduce_precision(ingeo: Union[Polygon,", "by limitation to the exterior ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords))", "polygon coordinates to pixel coordinates of an image array. Subtracts point of origin,", "geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if not poly.is_valid:", "isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width,", "} def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the number", "5000] #5000 sqm in UTM # Transform to chip pixelcoordinates and invert y-axis", "[1,2,3], 'wintercereal': [10,11]} col_classlabels: column with class labels. col_classids: column with class ids.", "class labels. col_classids: column with class ids. drop_other_classes: Drop classes that are not", "classes (forest related, environment, recreation, other grass, permanent grass, # wasteland, ..) }", "how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp: return df", "ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else: return ingeo def close_holes(ingeo:", "miny, maxx, maxy = reference_bounds w_poly, h_poly = (maxx - minx, maxy -", "i >= first_n_chips: break # # Clip geometry to chip chip_df = vector_df.pipe(utils.geo.clip,", "of polygon or geodataframe geometries in reference to a bounding box e.g. of", "= i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first, id second! df[f'r_{col_classids}'] =", "nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name]", "(due to self-intersection) valid by buffering with 0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid", "np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if not poly.is_valid: # Too low precision can", "ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform,", "GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def set_crs(df: GDF, epsg_code: Union[int,", "as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis =", "row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True) for", "multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif explode_mp_ and keep_biggest_poly_: raise", "702, 703, 704, 705], 'wintercereal': [10, 11, 13, 14, 15, 16, 17, 22,", "first n image chips, used for debugging. Returns: Dictionary containing the final chip_df,", "of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or shapely Polygon. precision: number of", "rcl_dict_id = {} for i, (key, value) in enumerate(rcl_scheme.items(), 1): for v in", "limitation to the exterior ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else:", "object or tuple of reference (e.g. image chip) in format (left, bottom, right,", "_to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p,", "cut a vector geodataframe to chip geometries. Filters small polygons and skips empty", "= [p.area for p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly", "of after comma values that should remain. Returns: Result polygon or geodataframe, same", "y] for x, y in zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon): return", "None: raise ValueError('ncols and nrows required for scale') x_scaler = ncols / w_poly", "# drop other non-crop related classes (forest related, environment, recreation, other grass, permanent", "str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify class", "407, 408, 409, 410, 411, 412, 413, 415, 416, 417, 418, 420, 421,", "DF import shapely from shapely.geometry import Polygon import rasterio.crs import geopandas as gpd", "ncols: image array ncols, required for scale. Returns: Result polygon or geodataframe, same", "in format (left, bottom, right, top) scale: Scale the polygons to the image", "172, 173, 174, 180, 182, 260, 261, 262, 263, 264, 266, 267, 268,", "poly if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p:", "\"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df = gpd.overlay(df,", "Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon holes by limitation to the exterior ring.\"\"\"", "UTM # Transform to chip pixelcoordinates and invert y-axis for COCO format. if", "elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df,", "range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf", "532, 533, 534, 536, 539, 540, 541, 542, 543, 544, 545, 547, 548,", "format. if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df =", "maxy = reference_bounds w_poly, h_poly = (maxx - minx, maxy - miny) except", "# Subtract point of origin of image bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin", "= utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for i, (chip_window,", "individual polygon geometries. Adds exploded polygons as rows at the end of the", "input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols): try: minx, miny, maxx,", "reference_bounds, scale, nrows, ncols): try: minx, miny, maxx, maxy = reference_bounds w_poly, h_poly", "GDF) -> GDF: \"\"\"Explode all multi-polygon geometries in a geodataframe into individual polygon", "= {} for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips:", "of an image chip. Usage e.g. for COCOJson format. Args: ingeo: Input Polygon", "and keep_biggest_poly_: raise ValueError('You can only use one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif", "to self-intersection) valid by buffering with 0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid is", "ingeo def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon holes by limitation", "as input. \"\"\" def _reduce_precision(poly: Polygon, precision: int) -> Polygon: geojson = shapely.geometry.mapping(poly)", "isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo", "501, 502, 503, 504, 505, 507, 509, 512, 513, 514, 515, 516, 517,", "index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with the biggest polygon contained in", "517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,", "len(row.geometry), ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf = outdf.append(df_temp,", "if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif", "# # Clip geometry to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not", "parameters. nrows: image array nrows, required for scale. ncols: image array ncols, required", "124, 160, 161, 280, 401, 402, 403, 404, 405, 406, 407, 408, 409,", "raise Exception( f'reference_bounds argument is of type {type(reference_bounds)}, needs to be a tuple", "type as input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols): try: minx,", "raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for i, (chip_window, chip_transform, chip_poly)", "284], 'other': [23, 24, 25, 30, 31, 32, 35, 36, 40, 42, 51,", "in potential multipolygons that were created by the intersection. Resets the dataframe index!", "112, 113, 114, 115, 116, 117, 118, 120, 121, 122, 123, 125, 126,", "\"\"\" outdf = df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon'] for idx,", "outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons", "max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return df def clip(df: GDF, clip_poly:", "Polygon]: \"\"\"Make invalid polygons (due to self-intersection) valid by buffering with 0.\"\"\" if", "reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def", "as gpd from tqdm import tqdm import utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) ->", "reclassification scheme. Returns: Result dataframe. \"\"\" if drop_other_classes is True: classes_to_drop = [v", "\"\"\"Reduces the number of after comma decimals of a shapely Polygon or geodataframe", "-> Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon or geodataframe geometries in reference to", "Polygon, precision: int) -> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly", "the same projection as the geodataframe. Args: df: input geodataframe clip_poly: Clipping polygon", "df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id = {} for i, (key, value) in enumerate(rcl_scheme.items(),", "tuple], scale: bool=False, nrows: int=None, ncols: int=None ) -> Union[Polygon, GDF]: \"\"\"Converts projected", "the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in row_idxs_mp: mp =", "423, 424, 429, 430, 431, 432, 434, 440, 448, 449, 450, 487, 488,", "\"\"\" if drop_other_classes is True: classes_to_drop = [v for values in rcl_scheme.values() for", "552, 553, 560, 561, 563, 570, 579] # drop other non-crop related classes", "and nrows required for scale') x_scaler = ncols / w_poly y_scaler = nrows", "geodataframe to chip geometries. Filters small polygons and skips empty chips. Args: vector_df:", "269, 270, 281, 282, 283, 284], 'other': [23, 24, 25, 30, 31, 32,", "& make more efficient! Args: df: input geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal':", "{} rcl_dict_id = {} for i, (key, value) in enumerate(rcl_scheme.items(), 1): for v", "clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue # Drop", "point of origin of image bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x", "class ids. drop_other_classes: Drop classes that are not contained in the reclassification scheme.", "561, 563, 570, 579] # drop other non-crop related classes (forest related, environment,", "chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)):", "# Clip geometry to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty):", ") -> Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates to pixel coordinates of an", "return df elif not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)}", ") -> Union[GDF, DF]: \"\"\"Reclassify class label and class ids in a dataframe", "final chip_df, chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width,", "the first n image chips, used for debugging. Returns: Dictionary containing the final", "Height (in coordinates or rows) of reference object (polygon or image, e.g. image", "= shapely.geometry.Polygon([[x - minx, y - miny] for x, y in zip(x_coords, y_coords)])", "elif scale is True: if ncols is None or nrows is None: raise", "x, y in zip(x_coords, y_coords)]) if scale is False: return p_origin elif scale", "403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 415, 416,", "162, 170, 171, 172, 173, 174, 180, 182, 260, 261, 262, 263, 264,", "# Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp: return df elif", "'grassland': [101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,", "return ingeo elif isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p:", "or tuple of reference (e.g. image chip) in format (left, bottom, right, top)", "polygon in potential multipolygons that were created by the intersection. Resets the dataframe", "if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p:", "== 'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon'] for idx, row in df_mp.iterrows(): df_temp", "551, 552, 553, 560, 561, 563, 570, 579] # drop other non-crop related", ">= first_n_chips: break # # Clip geometry to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly,", "_reduce_precision(poly: Polygon, precision: int) -> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision)", "poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo,", "invalid polygons (due to self-intersection) valid by buffering with 0.\"\"\" if isinstance(ingeo, Polygon):", "df_mp = df[df.geom_type == 'MultiPolygon'] for idx, row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns)", "= ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128,", "places for latitude and longitude which equates to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision).", "== 'MultiPolygon'] for idx, row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row]", "bounding box e.g. of an image chip. Usage e.g. for COCOJson format. Args:", "TODO: Simplify & make more efficient! Args: df: input geodataframe. rcl_scheme: Reclassification scheme,", "534, 536, 539, 540, 541, 542, 543, 544, 545, 547, 548, 549, 550,", "chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True)", "# Drop small geometries chip_df = chip_df[chip_df.geometry.area * (10 * 10) > 5000]", "False: return ingeo.buffer(0) else: return ingeo elif isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique():", "52, 53, 54, 55, 56, 57, 124, 160, 161, 280, 401, 402, 403,", "not contained in the reclassification scheme. Returns: Result dataframe. \"\"\" if drop_other_classes is", "outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons with the", "= { 'springcereal': [1, 2, 3, 4, 6, 7, 21, 55, 56, 210,", "Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda", "ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a", "110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121, 122, 123,", "Applies explode_mp function. Append dataframe rows for each polygon in potential multipolygons that", "rows for each polygon in potential multipolygons that were created by the intersection.", "y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry", "inplace=True) return outdf def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons with the biggest", "3, 4, 6, 7, 21, 55, 56, 210, 211, 212, 213, 214, 215,", "be cut to chip geometries. raster_width: rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio", "to cut a vector geodataframe to chip geometries. Filters small polygons and skips", "563, 570, 579] # drop other non-crop related classes (forest related, environment, recreation,", "meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired pixel width. chip_height: Desired", "420, 421, 422, 423, 424, 429, 430, 431, 432, 434, 440, 448, 449,", "'maize': [5, 216], 'grassland': [101, 102, 103, 104, 105, 106, 107, 108, 109,", "df_temp.loc[i, 'geometry'] = row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def", "for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break #", "def explode_mp(df: GDF) -> GDF: \"\"\"Explode all multi-polygon geometries in a geodataframe into", "Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon. Returns: Result geodataframe.", "geodataframe geometries. GeoJSON specification recommends 6 decimal places for latitude and longitude which", "image chip. Usage e.g. for COCOJson format. Args: ingeo: Input Polygon or geodataframe.", "the polygons to the image size/resolution. Requires image array nrows and ncols parameters.", "= (maxx - minx, maxy - miny) except (TypeError, ValueError): raise Exception( f'reference_bounds", "(forest related, environment, recreation, other grass, permanent grass, # wasteland, ..) } def", "same type as input. \"\"\" def _reduce_precision(poly: Polygon, precision: int) -> Polygon: geojson", "ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}'", "515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,", "nrows: image array nrows, required for scale. ncols: image array ncols, required for", "'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify class label", "_close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def set_crs(df:", "after comma values that should remain. Returns: Result polygon or geodataframe, same type", "(polygon or image, e.g. image chip. Returns: Result polygon or geodataframe, same type", "an image array. Subtracts point of origin, scales to pixelcoordinates. Input: ingeo: input", "scale: bool=False, nrows: int=None, ncols: int=None ) -> Union[Polygon, GDF]: \"\"\"Converts projected polygon", "= max_area_poly return df def clip(df: GDF, clip_poly: Polygon, explode_mp_: bool = False,", "is False: return p_origin elif scale is True: if ncols is None or", "in the MultiPolygon. Returns: Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda", "or geodataframe geometries. GeoJSON specification recommends 6 decimal places for latitude and longitude", "rasterio version that will integrate set_crs method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return", "projected polygon coordinates to pixel coordinates of an image array. Subtracts point of", "contained in the MultiPolygon. Returns: Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry =", "= False, ) -> GDF: \"\"\"Filter and clip geodataframe to clipping geometry. The", "class label and class ids in a dataframe column. # TODO: Simplify &", "Polygon import rasterio.crs import geopandas as gpd from tqdm import tqdm import utils.img", "Bounding box object or tuple of reference (e.g. image chip) in format (left,", "None or nrows is None: raise ValueError('ncols and nrows required for scale') x_scaler", "Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True )", "polygons as rows at the end of the geodataframe and resets its index.", "int=None, ncols: int=None ) -> Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates to pixel", "df[df.geom_type == 'MultiPolygon'] for idx, row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp =", "\"\"\"Reclassify class label and class ids in a dataframe column. # TODO: Simplify", "in the same crs as the input geodataframe. explode_mp_: Applies explode_mp function. Append", "chip_height: Desired pixel height. first_n_chips: Only processes the first n image chips, used", "-> GDF: \"\"\"Filter and clip geodataframe to clipping geometry. The clipping geometry needs", "return df reclass_legend = { 'springcereal': [1, 2, 3, 4, 6, 7, 21,", "values in rcl_scheme.values() for v in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {}", "Union[GDF, Polygon]: \"\"\"Close polygon holes by limitation to the exterior ring.\"\"\" def _close_holes(poly:", "were created by the intersection. Resets the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function.", "for COCOJson format. Args: ingeo: Input Polygon or geodataframe. reference_height: Height (in coordinates", "geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df =", "scale: Scale the polygons to the image size/resolution. Requires image array nrows and", "= False, keep_biggest_poly_: bool = False, ) -> GDF: \"\"\"Filter and clip geodataframe", "122, 123, 125, 126, 162, 170, 171, 172, 173, 174, 180, 182, 260,", "as np from geopandas import GeoDataFrame as GDF from pandas import DataFrame as", "a geodataframe into individual polygon geometries. Adds exploded polygons as rows at the", "split multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif explode_mp_ and keep_biggest_poly_:", "chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue # Drop small geometries chip_df = chip_df[chip_df.geometry.area", "transform, nrows, ncols via rasterio.transform.reference_bounds') # Subtract point of origin of image bbox.", "ncols, required for scale. Returns: Result polygon or geodataframe, same type as input.", "and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True", "413, 415, 416, 417, 418, 420, 421, 422, 423, 424, 429, 430, 431,", "more efficient! Args: df: input geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal':", "_invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y]", "230, 234, 701, 702, 703, 704, 705], 'wintercereal': [10, 11, 13, 14, 15,", "return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def", "explode_mp_ and keep_biggest_poly_: raise ValueError('You can only use one of \"explode_mp_\" or \"keep_biggest_poly_\"!')", "to pixelcoordinates. Input: ingeo: input geodataframe or shapely Polygon. reference_bounds: Bounding box object", "Polygon, reference_bounds, scale, nrows, ncols): try: minx, miny, maxx, maxy = reference_bounds w_poly,", "the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with the biggest polygon", "# df = gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist()", "_p: _close_holes(_p)) return ingeo def set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF: \"\"\"Sets", "of the geodataframe and resets its index. \"\"\" outdf = df[df.geom_type == 'Polygon']", "_invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow", "reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True", "is of type {type(reference_bounds)}, needs to be a tuple or rasterio bounding box", "449, 450, 487, 488, 489, 491, 493, 496, 497, 498, 499, 501, 502,", "df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp: return df elif not explode_mp_ and (not", "/ w_poly y_scaler = nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0,", "in a geodataframe into individual polygon geometries. Adds exploded polygons as rows at", "MultiPolygon. Returns: Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly))", "Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height))", "input geodataframe or shapely Polygon. reference_bounds: Bounding box object or tuple of reference", "and resets its index. \"\"\" outdf = df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type", "429, 430, 431, 432, 434, 440, 448, 449, 450, 487, 488, 489, 491,", "{type(reference_bounds)}, needs to be a tuple or rasterio bounding box ' f'instance. Can", "f'instance. Can be delineated from transform, nrows, ncols via rasterio.transform.reference_bounds') # Subtract point", "not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue # Drop small geometries chip_df", "True: if ncols is None or nrows is None: raise ValueError('ncols and nrows", "f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window': chip_window, 'chip_transform': chip_transform, 'chip_poly': chip_poly}", "def _reduce_precision(poly: Polygon, precision: int) -> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']),", "522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534,", "_invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo", "Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo, Polygon): return _close_holes(ingeo)", "that will integrate set_crs method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return df def", "bool = False, ) -> GDF: \"\"\"Filter and clip geodataframe to clipping geometry.", "pixel coordinates of an image array. Subtracts point of origin, scales to pixelcoordinates.", "return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut", "chip geometries. raster_width: rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired", "{} for i, (key, value) in enumerate(rcl_scheme.items(), 1): for v in value: rcl_dict[v]", "geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if not poly.is_valid: # Too low", "nrows and ncols parameters. nrows: image array nrows, required for scale. ncols: image", "222, 223, 235], 'maize': [5, 216], 'grassland': [101, 102, 103, 104, 105, 106,", "Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column with class labels. col_classids:", "method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF) -> GDF:", "argument is of type {type(reference_bounds)}, needs to be a tuple or rasterio bounding", "nrows is None: raise ValueError('ncols and nrows required for scale') x_scaler = ncols", "raise ValueError('You can only use one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return", "ncols parameters. nrows: image array nrows, required for scale. ncols: image array ncols,", "= chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df':", "as GDF from pandas import DataFrame as DF import shapely from shapely.geometry import", "def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid polygons (due to self-intersection)", "second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend = { 'springcereal': [1, 2, 3,", "and longitude which equates to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input", "input. \"\"\" def _reduce_precision(poly: Polygon, precision: int) -> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates']", "return ingeo else: return ingeo def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close", "for values in rcl_scheme.values() for v in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict =", "isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry =", "clipping geometry needs to be in the same projection as the geodataframe. Args:", "424, 429, 430, 431, 432, 434, 440, 448, 449, 450, 487, 488, 489,", "539, 540, 541, 542, 543, 544, 545, 547, 548, 549, 550, 551, 552,", "wasteland, ..) } def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces", "GDF from pandas import DataFrame as DF import shapely from shapely.geometry import Polygon", "the geodataframe and resets its index. \"\"\" outdf = df[df.geom_type == 'Polygon'] df_mp", "503, 504, 505, 507, 509, 512, 513, 514, 515, 516, 517, 518, 519,", "545, 547, 548, 549, 550, 551, 552, 553, 560, 561, 563, 570, 579]", "531, 532, 533, 534, 536, 539, 540, 541, 542, 543, 544, 545, 547,", "if ncols is None or nrows is None: raise ValueError('ncols and nrows required", "\"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for", "first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend = { 'springcereal': [1,", "is None or nrows is None: raise ValueError('ncols and nrows required for scale')", "56, 57, 124, 160, 161, 280, 401, 402, 403, 404, 405, 406, 407,", "comma decimals of a shapely Polygon or geodataframe geometries. GeoJSON specification recommends 6", "y_coords)]) if scale is False: return p_origin elif scale is True: if ncols", "a bounding box e.g. of an image chip. Usage e.g. for COCOJson format.", "chip) in format (left, bottom, right, top) scale: Scale the polygons to the", "in value: rcl_dict[v] = key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map", "due to line overlap effects. poly = poly.buffer(0) return poly if isinstance(ingeo, Polygon):", "set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF: \"\"\"Sets dataframe crs in geopandas pipeline.", "ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128,", "= poly.buffer(0) return poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF):", "True: classes_to_drop = [v for values in rcl_scheme.values() for v in values] df", "to chip pixelcoordinates and invert y-axis for COCO format. if not all(chip_df.geometry.is_empty): chip_df", "precision can potentially lead to invalid polygons due to line overlap effects. poly", "df = gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if", "geometry. The clipping geometry needs to be in the same projection as the", "row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df: GDF) ->", "np from geopandas import GeoDataFrame as GDF from pandas import DataFrame as DF", "for idx in row_idxs_mp: mp = df.loc[idx].geometry poly_areas = [p.area for p in", "= ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox,", "w_poly y_scaler = nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0))", "MultiPolygons with the biggest polygon contained in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type ==", "= df[col_classids].copy().map(rcl_dict) # map name first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df", "Args: df: input geodataframe clip_poly: Clipping polygon geometry, needs to be in the", "keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\")", "116, 117, 118, 120, 121, 122, 123, 125, 126, 162, 170, 171, 172,", "267, 268, 269, 270, 281, 282, 283, 284], 'other': [23, 24, 25, 30,", "image chip. Returns: Result polygon or geodataframe, same type as input. \"\"\" def", "57, 124, 160, 161, 280, 401, 402, 403, 404, 405, 406, 407, 408,", "of an image array. Subtracts point of origin, scales to pixelcoordinates. Input: ingeo:", "zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF):", "..) } def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the", "i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id)", "430, 431, 432, 434, 440, 448, 449, 450, 487, 488, 489, 491, 493,", "(key, value) in enumerate(rcl_scheme.items(), 1): for v in value: rcl_dict[v] = key rcl_dict_id[v]", "reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for x,", "108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121,", "return p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry =", "poly = poly.buffer(0) return poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo,", "pixel width. chip_height: Desired pixel height. first_n_chips: Only processes the first n image", "polygon holes by limitation to the exterior ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors:", "Exception( f'reference_bounds argument is of type {type(reference_bounds)}, needs to be a tuple or", "raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a vector geodataframe to", "or geodataframe. reference_height: Height (in coordinates or rows) of reference object (polygon or", "miny) except (TypeError, ValueError): raise Exception( f'reference_bounds argument is of type {type(reference_bounds)}, needs", "geometries. Adds exploded polygons as rows at the end of the geodataframe and", "525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 536, 539, 540,", "{ 'springcereal': [1, 2, 3, 4, 6, 7, 21, 55, 56, 210, 211,", "Union[GDF, DF]: \"\"\"Reclassify class label and class ids in a dataframe column. #", "invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int ) -> Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon", "of reference object (polygon or image, e.g. image chip. Returns: Result polygon or", "418, 420, 421, 422, 423, 424, 429, 430, 431, 432, 434, 440, 448,", "Returns: Result dataframe. \"\"\" if drop_other_classes is True: classes_to_drop = [v for values", "should remain. Returns: Result polygon or geodataframe, same type as input. \"\"\" def", "_p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height:", "-> Union[GDF, Polygon]: \"\"\"Close polygon holes by limitation to the exterior ring.\"\"\" def", "ncols / w_poly y_scaler = nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0,", "31, 32, 35, 36, 40, 42, 51, 52, 53, 54, 55, 56, 57,", "import tqdm import utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid", "0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid is False: return ingeo.buffer(0) else: return ingeo", "261, 262, 263, 264, 266, 267, 268, 269, 270, 281, 282, 283, 284],", "Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the number of after comma", "105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,", "703, 704, 705], 'wintercereal': [10, 11, 13, 14, 15, 16, 17, 22, 57,", "precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or shapely Polygon. precision: number of after", "chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name =", "tuple of reference (e.g. image chip) in format (left, bottom, right, top) scale:", "180, 182, 260, 261, 262, 263, 264, 266, 267, 268, 269, 270, 281,", "Requires image array nrows and ncols parameters. nrows: image array nrows, required for", "ValueError('ncols and nrows required for scale') x_scaler = ncols / w_poly y_scaler =", "df.crs = {'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF) -> GDF: \"\"\"Explode all", "for scale') x_scaler = ncols / w_poly y_scaler = nrows / h_poly return", "df elif explode_mp_ and keep_biggest_poly_: raise ValueError('You can only use one of \"explode_mp_\"", "Simplify & make more efficient! Args: df: input geodataframe. rcl_scheme: Reclassification scheme, e.g.", "chip geometries. Filters small polygons and skips empty chips. Args: vector_df: Geodataframe containing", "Result polygon or geodataframe, same type as input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds,", "v in value: rcl_dict[v] = key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) #", "= shapely.geometry.Polygon([[x, reference_height - y] for x, y in zip(x_coords, y_coords)]) return p_inverted_y_axis", "509, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523,", "536, 539, 540, 541, 542, 543, 544, 545, 547, 548, 549, 550, 551,", "explode_mp(df: GDF) -> GDF: \"\"\"Explode all multi-polygon geometries in a geodataframe into individual", "-> Union[GDF, Polygon]: \"\"\"Make invalid polygons (due to self-intersection) valid by buffering with", "pixelcoordinates and invert y-axis for COCO format. if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords,", "as DF import shapely from shapely.geometry import Polygon import rasterio.crs import geopandas as", "if ingeo.is_valid is False: return ingeo.buffer(0) else: return ingeo elif isinstance(ingeo, GDF): if", "drop_other_classes: bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify class label and class ids in", "int ) -> Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon or geodataframe geometries in", "0, 0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo,", "rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired pixel width. chip_height:", "409, 410, 411, 412, 413, 415, 416, 417, 418, 420, 421, 422, 423,", "df reclass_legend = { 'springcereal': [1, 2, 3, 4, 6, 7, 21, 55,", "chip_df[chip_df.geometry.area * (10 * 10) > 5000] #5000 sqm in UTM # Transform", "Dictionary containing the final chip_df, chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width,", "440, 448, 449, 450, 487, 488, 489, 491, 493, 496, 497, 498, 499,", "502, 503, 504, 505, 507, 509, 512, 513, 514, 515, 516, 517, 518,", "warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return", "526, 527, 528, 529, 530, 531, 532, 533, 534, 536, 539, 540, 541,", "gpd from tqdm import tqdm import utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF,", "= np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if not poly.is_valid: # Too low precision", "268, 269, 270, 281, 282, 283, 284], 'other': [23, 24, 25, 30, 31,", "the end of the geodataframe and resets its index. \"\"\" outdf = df[df.geom_type", "import geopandas as gpd from tqdm import tqdm import utils.img def buffer_zero(ingeo: Union[GDF,", "_reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False,", "reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None, ncols: int=None ) -> Union[Polygon, GDF]:", "= row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df: GDF)", "h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo,", "nrows, ncols via rasterio.transform.reference_bounds') # Subtract point of origin of image bbox. x_coords,", "to the exterior ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return", "= shapely.geometry.shape(geojson) if not poly.is_valid: # Too low precision can potentially lead to", "e.g. for COCOJson format. Args: ingeo: Input Polygon or geodataframe. reference_height: Height (in", "related, environment, recreation, other grass, permanent grass, # wasteland, ..) } def reduce_precision(ingeo:", "ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds:", "from shapely.geometry import Polygon import rasterio.crs import geopandas as gpd from tqdm import", "f'reference_bounds argument is of type {type(reference_bounds)}, needs to be a tuple or rasterio", "col_classids: column with class ids. drop_other_classes: Drop classes that are not contained in", "= df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in row_idxs_mp: mp = df.loc[idx].geometry poly_areas =", "small geometries chip_df = chip_df[chip_df.geometry.area * (10 * 10) > 5000] #5000 sqm", "def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None, ncols: int=None", "(e.g. image chip) in format (left, bottom, right, top) scale: Scale the polygons", "col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify", "the number of after comma decimals of a shapely Polygon or geodataframe geometries.", "410, 411, 412, 413, 415, 416, 417, 418, 420, 421, 422, 423, 424,", "image, e.g. image chip. Returns: Result polygon or geodataframe, same type as input.", "Returns: Result polygon or geodataframe, same type as input. \"\"\" def _reduce_precision(poly: Polygon,", "import warnings from typing import Union, Dict import numpy as np from geopandas", "512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524,", "poly_areas = [p.area for p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] =", "same projection as the geodataframe. Args: df: input geodataframe clip_poly: Clipping polygon geometry,", "df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in row_idxs_mp: mp = df.loc[idx].geometry poly_areas = [p.area", "ingeo def set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF: \"\"\"Sets dataframe crs in", "\"\"\"Close polygon holes by limitation to the exterior ring.\"\"\" def _close_holes(poly: Polygon): if", "416, 417, 418, 420, 421, 422, 423, 424, 429, 430, 431, 432, 434,", "int) -> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson)", "211, 212, 213, 214, 215, 224, 230, 234, 701, 702, 703, 704, 705],", "value: rcl_dict[v] = key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name", "input geodataframe clip_poly: Clipping polygon geometry, needs to be in the same crs", "by the intersection. Resets the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons", "= gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i,", "return outdf def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons with the biggest polygon", "exterior ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly if", "ingeo else: return ingeo def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon", "multipolygons that were created by the intersection. Resets the dataframe index! keep_biggest_poly_: Applies", "\"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols): try: minx, miny, maxx, maxy", "keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids:", "clip_poly, how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp: return", "geometries in a geodataframe into individual polygon geometries. Adds exploded polygons as rows", "elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo:", "False: return p_origin elif scale is True: if ncols is None or nrows", "poly.buffer(0) return poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry", "key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first, id second!", "= ncols / w_poly y_scaler = nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler,", "use one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return", "geodataframe, same type as input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols):", "be delineated from transform, nrows, ncols via rasterio.transform.reference_bounds') # Subtract point of origin", "521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533,", "label and class ids in a dataframe column. # TODO: Simplify & make", "precision: int) -> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly =", "Can be delineated from transform, nrows, ncols via rasterio.transform.reference_bounds') # Subtract point of", "shapely Polygon or geodataframe geometries. GeoJSON specification recommends 6 decimal places for latitude", "10) > 5000] #5000 sqm in UTM # Transform to chip pixelcoordinates and", "519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531,", "Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df", "with next rasterio version that will integrate set_crs method. \"\"\" df.crs = {'init':", "from transform, nrows, ncols via rasterio.transform.reference_bounds') # Subtract point of origin of image", "or image, e.g. image chip. Returns: Result polygon or geodataframe, same type as", "in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break # # Clip geometry to chip", "= gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not", "except (TypeError, ValueError): raise Exception( f'reference_bounds argument is of type {type(reference_bounds)}, needs to", "shapely Polygon. precision: number of after comma values that should remain. Returns: Result", "161, 280, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411,", "chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] =", "ncols via rasterio.transform.reference_bounds') # Subtract point of origin of image bbox. x_coords, y_coords", "pipeline. TODO: Deprecate with next rasterio version that will integrate set_crs method. \"\"\"", "-> GDF: \"\"\"Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon.\"\"\" row_idxs_mp", "bounding box ' f'instance. Can be delineated from transform, nrows, ncols via rasterio.transform.reference_bounds')", "= f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window': chip_window, 'chip_transform': chip_transform, 'chip_poly':", "be in the same projection as the geodataframe. Args: df: input geodataframe clip_poly:", "\" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif explode_mp_ and keep_biggest_poly_: raise ValueError('You can", "= {'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF) -> GDF: \"\"\"Explode all multi-polygon", "Too low precision can potentially lead to invalid polygons due to line overlap", "417, 418, 420, 421, 422, 423, 424, 429, 430, 431, 432, 434, 440,", "7, 21, 55, 56, 210, 211, 212, 213, 214, 215, 224, 230, 234,", "def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes:", "precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo def", "Polygon. reference_bounds: Bounding box object or tuple of reference (e.g. image chip) in", ") -> GDF: \"\"\"Filter and clip geodataframe to clipping geometry. The clipping geometry", "_{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window': chip_window, 'chip_transform': chip_transform, 'chip_poly': chip_poly} return all_chip_dfs", "173, 174, 180, 182, 260, 261, 262, 263, 264, 266, 267, 268, 269,", "224, 230, 234, 701, 702, 703, 704, 705], 'wintercereal': [10, 11, 13, 14,", "df.loc[idx, 'geometry'] = max_area_poly return df def clip(df: GDF, clip_poly: Polygon, explode_mp_: bool", "created by the intersection. Resets the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces", "reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}'", "nrows, required for scale. ncols: image array ncols, required for scale. Returns: Result", "return df elif explode_mp_ and keep_biggest_poly_: raise ValueError('You can only use one of", "coordinates to pixel coordinates of an image array. Subtracts point of origin, scales", "typing import Union, Dict import numpy as np from geopandas import GeoDataFrame as", "21, 55, 56, 210, 211, 212, 213, 214, 215, 224, 230, 234, 701,", "434, 440, 448, 449, 450, 487, 488, 489, 491, 493, 496, 497, 498,", "4, 6, 7, 21, 55, 56, 210, 211, 212, 213, 214, 215, 224,", "220, 221, 222, 223, 235], 'maize': [5, 216], 'grassland': [101, 102, 103, 104,", "to clipping geometry. The clipping geometry needs to be in the same projection", "lead to invalid polygons due to line overlap effects. poly = poly.buffer(0) return", "Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo", "170, 171, 172, 173, 174, 180, 182, 260, 261, 262, 263, 264, 266,", "reference_bounds: Bounding box object or tuple of reference (e.g. image chip) in format", "487, 488, 489, 491, 493, 496, 497, 498, 499, 501, 502, 503, 504,", "to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1,", "shapely Polygon. reference_bounds: Bounding box object or tuple of reference (e.g. image chip)", "int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the number of after comma decimals of a", "array nrows, required for scale. ncols: image array ncols, required for scale. Returns:", "(TypeError, ValueError): raise Exception( f'reference_bounds argument is of type {type(reference_bounds)}, needs to be", "13, 14, 15, 16, 17, 22, 57, 220, 221, 222, 223, 235], 'maize':", "= {} for i, (key, value) in enumerate(rcl_scheme.items(), 1): for v in value:", "all multi-polygon geometries in a geodataframe into individual polygon geometries. Adds exploded polygons", "Usage e.g. for COCOJson format. Args: ingeo: Input Polygon or geodataframe. reference_height: Height", "potential multipolygons that were created by the intersection. Resets the dataframe index! keep_biggest_poly_:", "{len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif explode_mp_ and", "529, 530, 531, 532, 533, 534, 536, 539, 540, 541, 542, 543, 544,", "explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons. Use \"", "ncols: int=None ) -> Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates to pixel coordinates", "e.g. of an image chip. Usage e.g. for COCOJson format. Args: ingeo: Input", "* len(row.geometry), ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf =", "size/resolution. Requires image array nrows and ncols parameters. nrows: image array nrows, required", "54, 55, 56, 57, 124, 160, 161, 280, 401, 402, 403, 404, 405,", "to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or shapely Polygon.", "elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub',", "= reference_bounds w_poly, h_poly = (maxx - minx, maxy - miny) except (TypeError,", "543, 544, 545, 547, 548, 549, 550, 551, 552, 553, 560, 561, 563,", "df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True) for i in range(len(row.geometry)):", "miny] for x, y in zip(x_coords, y_coords)]) if scale is False: return p_origin", "55, 56, 57, 124, 160, 161, 280, 401, 402, 403, 404, 405, 406,", "return ingeo def set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF: \"\"\"Sets dataframe crs", "104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,", "with the biggest polygon contained in the MultiPolygon. Returns: Result geodataframe. \"\"\" df", "GDF: \"\"\"Filter and clip geodataframe to clipping geometry. The clipping geometry needs to", "reference_height: int ) -> Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon or geodataframe geometries", "to be cut to chip geometries. raster_width: rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform:", "type as input. \"\"\" def _reduce_precision(poly: Polygon, precision: int) -> Polygon: geojson =", "* (10 * 10) > 5000] #5000 sqm in UTM # Transform to", "polygons (due to self-intersection) valid by buffering with 0.\"\"\" if isinstance(ingeo, Polygon): if", "the final chip_df, chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform,", "'wintercereal': [10, 11, 13, 14, 15, 16, 17, 22, 57, 220, 221, 222,", "GDF]: \"\"\"Converts projected polygon coordinates to pixel coordinates of an image array. Subtracts", "drop_other_classes: Drop classes that are not contained in the reclassification scheme. Returns: Result", "114, 115, 116, 117, 118, 120, 121, 122, 123, 125, 126, 162, 170,", "idx, row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True)", "chips, used for debugging. Returns: Dictionary containing the final chip_df, chip_window, chip_transform, chip_poly", "Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid polygons (due to self-intersection) valid by buffering", "Result polygon or geodataframe, same type as input. \"\"\" def _reduce_precision(poly: Polygon, precision:", "geometry to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry =", "and invert y-axis for COCO format. if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds,", "isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def set_crs(df: GDF, epsg_code:", "496, 497, 498, 499, 501, 502, 503, 504, 505, 507, 509, 512, 513,", "Returns: Result polygon or geodataframe, same type as input. \"\"\" def _to_pixelcoords(poly: Polygon,", "y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for x, y in", "704, 705], 'wintercereal': [10, 11, 13, 14, 15, 16, 17, 22, 57, 220,", "raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a vector geodataframe to chip geometries.", "in the reclassification scheme. Returns: Result dataframe. \"\"\" if drop_other_classes is True: classes_to_drop", "x_coords, y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx, y - miny] for", "266, 267, 268, 269, 270, 281, 282, 283, 284], 'other': [23, 24, 25,", "coordinates of an image array. Subtracts point of origin, scales to pixelcoordinates. Input:", "self-intersection) valid by buffering with 0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid is False:", "the biggest polygon contained in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for", "Drop small geometries chip_df = chip_df[chip_df.geometry.area * (10 * 10) > 5000] #5000", "the geometries to be cut to chip geometries. raster_width: rasterio meta['width'] raster_height: rasterio", "Append dataframe rows for each polygon in potential multipolygons that were created by", "Union[GDF, Polygon]: \"\"\"Make invalid polygons (due to self-intersection) valid by buffering with 0.\"\"\"", "= mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return df def clip(df: GDF, clip_poly: Polygon,", "geometries chip_df = chip_df[chip_df.geometry.area * (10 * 10) > 5000] #5000 sqm in", "rasterio meta['transform'] chip_width: Desired pixel width. chip_height: Desired pixel height. first_n_chips: Only processes", "elif isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return", "of a shapely Polygon or geodataframe geometries. GeoJSON specification recommends 6 decimal places", "first_n_chips: break # # Clip geometry to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True)", "df: input geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column", "== 'MultiPolygon'].tolist() if not row_idxs_mp: return df elif not explode_mp_ and (not keep_biggest_poly_):", "489, 491, 493, 496, 497, 498, 499, 501, 502, 503, 504, 505, 507,", "GDF: \"\"\"Explode all multi-polygon geometries in a geodataframe into individual polygon geometries. Adds", "- miny] for x, y in zip(x_coords, y_coords)]) if scale is False: return", "geodataframe, same type as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords =", "GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else:", "\"\"\"Filter and clip geodataframe to clipping geometry. The clipping geometry needs to be", "x, y in zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height)", "40, 42, 51, 52, 53, 54, 55, 56, 57, 124, 160, 161, 280,", "image bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx, y -", "518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530,", "504, 505, 507, 509, 512, 513, 514, 515, 516, 517, 518, 519, 520,", "cut to chip geometries. raster_width: rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform']", "at the end of the geodataframe and resets its index. \"\"\" outdf =", "from typing import Union, Dict import numpy as np from geopandas import GeoDataFrame", "rasterio bounding box ' f'instance. Can be delineated from transform, nrows, ncols via", "clip(df: GDF, clip_poly: Polygon, explode_mp_: bool = False, keep_biggest_poly_: bool = False, )", "nrows: int=None, ncols: int=None ) -> Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates to", "return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None,", "ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def set_crs(df: GDF, epsg_code: Union[int, str])", "scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale,", "= [v for values in rcl_scheme.values() for v in values] df = df[df[col_classids].isin(classes_to_drop)].copy()", "df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] =", "Desired pixel height. first_n_chips: Only processes the first n image chips, used for", "scale') x_scaler = ncols / w_poly y_scaler = nrows / h_poly return shapely.affinity.scale(p_origin,", "to chip geometries. raster_width: rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width:", "line overlap effects. poly = poly.buffer(0) return poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo,", "labels. col_classids: column with class ids. drop_other_classes: Drop classes that are not contained", "Desired pixel width. chip_height: Desired pixel height. first_n_chips: Only processes the first n", "'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon'] for idx, row in df_mp.iterrows(): df_temp =", "needs to be in the same crs as the input geodataframe. explode_mp_: Applies", "\"\"\"Invert y-axis of polygon or geodataframe geometries in reference to a bounding box", "in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True) for i", "def clip(df: GDF, clip_poly: Polygon, explode_mp_: bool = False, keep_biggest_poly_: bool = False,", "longitude which equates to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe", "450, 487, 488, 489, 491, 493, 496, 497, 498, 499, 501, 502, 503,", "raise ValueError('ncols and nrows required for scale') x_scaler = ncols / w_poly y_scaler", "or geodataframe, same type as input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows,", "int=None ) -> Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates to pixel coordinates of", "701, 702, 703, 704, 705], 'wintercereal': [10, 11, 13, 14, 15, 16, 17,", "56, 210, 211, 212, 213, 214, 215, 224, 230, 234, 701, 702, 703,", "in zip(x_coords, y_coords)]) if scale is False: return p_origin elif scale is True:", "coordinates or rows) of reference object (polygon or image, e.g. image chip. Returns:", "values that should remain. Returns: Result polygon or geodataframe, same type as input.", "491, 493, 496, 497, 498, 499, 501, 502, 503, 504, 505, 507, 509,", "Scale the polygons to the image size/resolution. Requires image array nrows and ncols", "resulted in {len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif", "df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return", "Adds exploded polygons as rows at the end of the geodataframe and resets", "return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return", "utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for i, (chip_window, chip_transform,", "close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon holes by limitation to the", "[1, 2, 3, 4, 6, 7, 21, 55, 56, 210, 211, 212, 213,", "df.loc[idx].geometry poly_areas = [p.area for p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry']", "Dict import numpy as np from geopandas import GeoDataFrame as GDF from pandas", "used for debugging. Returns: Dictionary containing the final chip_df, chip_window, chip_transform, chip_poly objects.", "560, 561, 563, 570, 579] # drop other non-crop related classes (forest related,", "nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows,", "geometries. GeoJSON specification recommends 6 decimal places for latitude and longitude which equates", "geodataframe. explode_mp_: Applies explode_mp function. Append dataframe rows for each polygon in potential", "nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int ) -> Union[Polygon,", "reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window':", "214, 215, 224, 230, 234, 701, 702, 703, 704, 705], 'wintercereal': [10, 11,", "the exterior ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly", "24, 25, 30, 31, 32, 35, 36, 40, 42, 51, 52, 53, 54,", "MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in row_idxs_mp: mp = df.loc[idx].geometry", "_close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo, Polygon): return", "for x, y in zip(x_coords, y_coords)]) if scale is False: return p_origin elif", "e.g. image chip. Returns: Result polygon or geodataframe, same type as input. \"\"\"", "[10, 11, 13, 14, 15, 16, 17, 22, 57, 220, 221, 222, 223,", "polygon geometry, needs to be in the same crs as the input geodataframe.", "geometries. Filters small polygons and skips empty chips. Args: vector_df: Geodataframe containing the", "GDF]: \"\"\"Invert y-axis of polygon or geodataframe geometries in reference to a bounding", "53, 54, 55, 56, 57, 124, 160, 161, 280, 401, 402, 403, 404,", "y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx, y - miny] for x,", "35, 36, 40, 42, 51, 52, 53, 54, 55, 56, 57, 124, 160,", "rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF,", "poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda", "p_origin = shapely.geometry.Polygon([[x - minx, y - miny] for x, y in zip(x_coords,", "explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str=", "in zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo,", "120, 121, 122, 123, 125, 126, 162, 170, 171, 172, 173, 174, 180,", "def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon holes by limitation to", "/ h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo, Polygon): return", "GDF: \"\"\"Sets dataframe crs in geopandas pipeline. TODO: Deprecate with next rasterio version", "image array ncols, required for scale. Returns: Result polygon or geodataframe, same type", "bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify class label and class ids in a", "GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF],", "ValueError('You can only use one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df)", "'MultiPolygon'] for idx, row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] *", "reference_height - y] for x, y in zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo,", "#5000 sqm in UTM # Transform to chip pixelcoordinates and invert y-axis for", "Subtract point of origin of image bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin =", "chip. Returns: Result polygon or geodataframe, same type as input. \"\"\" def _invert_y_axis(poly:", "_p.buffer(0)) return ingeo else: return ingeo def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]:", "' f'instance. Can be delineated from transform, nrows, ncols via rasterio.transform.reference_bounds') # Subtract", "123, 125, 126, 162, 170, 171, 172, 173, 174, 180, 182, 260, 261,", "crs as the input geodataframe. explode_mp_: Applies explode_mp function. Append dataframe rows for", "= df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id = {} for i, (key, value) in", "to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None, ncols: int=None )", "invalid polygons due to line overlap effects. poly = poly.buffer(0) return poly if", "df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend = { 'springcereal': [1, 2, 3, 4,", "import rasterio.crs import geopandas as gpd from tqdm import tqdm import utils.img def", "Returns: Dictionary containing the final chip_df, chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds =", "GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height,", "or shapely Polygon. reference_bounds: Bounding box object or tuple of reference (e.g. image", "pixel height. first_n_chips: Only processes the first n image chips, used for debugging.", "505, 507, 509, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521,", "or nrows is None: raise ValueError('ncols and nrows required for scale') x_scaler =", "GeoJSON specification recommends 6 decimal places for latitude and longitude which equates to", "Geodataframe containing the geometries to be cut to chip geometries. raster_width: rasterio meta['width']", "<gh_stars>10-100 # geo.py import warnings from typing import Union, Dict import numpy as", "as rows at the end of the geodataframe and resets its index. \"\"\"", "{} for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break", "recreation, other grass, permanent grass, # wasteland, ..) } def reduce_precision(ingeo: Union[Polygon, GDF],", "be in the same crs as the input geodataframe. explode_mp_: Applies explode_mp function.", "geodataframe. Args: df: input geodataframe clip_poly: Clipping polygon geometry, needs to be in", "262, 263, 264, 266, 267, 268, 269, 270, 281, 282, 283, 284], 'other':", "Args: vector_df: Geodataframe containing the geometries to be cut to chip geometries. raster_width:", "chip_df = chip_df[chip_df.geometry.area * (10 * 10) > 5000] #5000 sqm in UTM", "ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF],", "projection as the geodataframe. Args: df: input geodataframe clip_poly: Clipping polygon geometry, needs", "14, 15, 16, 17, 22, 57, 220, 221, 222, 223, 235], 'maize': [5,", "= key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first, id", "a dataframe column. # TODO: Simplify & make more efficient! Args: df: input", "grass, permanent grass, # wasteland, ..) } def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3)", "in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return", "return ingeo.buffer(0) else: return ingeo elif isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry", "== 'MultiPolygon'].tolist() for idx in row_idxs_mp: mp = df.loc[idx].geometry poly_areas = [p.area for", "small polygons and skips empty chips. Args: vector_df: Geodataframe containing the geometries to", "= poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for x, y in zip(x_coords,", "rcl_scheme.values() for v in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id =", "valid by buffering with 0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid is False: return", "in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else: return ingeo def", "reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the number of after", "raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a vector geodataframe to chip", "= df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly, how='intersection')", "break # # Clip geometry to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if", "281, 282, 283, 284], 'other': [23, 24, 25, 30, 31, 32, 35, 36,", "precision: number of after comma values that should remain. Returns: Result polygon or", "= ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else: return ingeo def close_holes(ingeo: Union[GDF, Polygon])", "import numpy as np from geopandas import GeoDataFrame as GDF from pandas import", "def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols): try: minx, miny, maxx, maxy =", "421, 422, 423, 424, 429, 430, 431, 432, 434, 440, 448, 449, 450,", "Clip geometry to chip chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry", "MultiPolygons with the biggest polygon contained in the MultiPolygon. Returns: Result geodataframe. \"\"\"", "non-crop related classes (forest related, environment, recreation, other grass, permanent grass, # wasteland,", "each polygon in potential multipolygons that were created by the intersection. Resets the", "to the image size/resolution. Requires image array nrows and ncols parameters. nrows: image", "with class labels. col_classids: column with class ids. drop_other_classes: Drop classes that are", "a tuple or rasterio bounding box ' f'instance. Can be delineated from transform,", "row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp: return df elif not explode_mp_", "GDF, clip_poly: Polygon, explode_mp_: bool = False, keep_biggest_poly_: bool = False, ) ->", "406, 407, 408, 409, 410, 411, 412, 413, 415, 416, 417, 418, 420,", "or geodataframe, same type as input. \"\"\" def _reduce_precision(poly: Polygon, precision: int) ->", "meta['transform'] chip_width: Desired pixel width. chip_height: Desired pixel height. first_n_chips: Only processes the", "e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column with class labels. col_classids: column with", "= chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name", "keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons with the biggest polygon contained in the", "that are not contained in the reclassification scheme. Returns: Result dataframe. \"\"\" if", "epsg_code: Union[int, str]) -> GDF: \"\"\"Sets dataframe crs in geopandas pipeline. TODO: Deprecate", "174, 180, 182, 260, 261, 262, 263, 264, 266, 267, 268, 269, 270,", "dataframe rows for each polygon in potential multipolygons that were created by the", "polygon contained in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in", "-> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if", "DF]: \"\"\"Reclassify class label and class ids in a dataframe column. # TODO:", "rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired pixel width. chip_height: Desired pixel height.", "of type {type(reference_bounds)}, needs to be a tuple or rasterio bounding box '", "117, 118, 120, 121, 122, 123, 125, 126, 162, 170, 171, 172, 173,", "height. first_n_chips: Only processes the first n image chips, used for debugging. Returns:", "ingeo elif isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0))", "Polygon): if ingeo.is_valid is False: return ingeo.buffer(0) else: return ingeo elif isinstance(ingeo, GDF):", "527, 528, 529, 530, 531, 532, 533, 534, 536, 539, 540, 541, 542,", "y - miny] for x, y in zip(x_coords, y_coords)]) if scale is False:", "biggest polygon contained in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx", "2, 3, 4, 6, 7, 21, 55, 56, 210, 211, 212, 213, 214,", "max_area_poly return df def clip(df: GDF, clip_poly: Polygon, explode_mp_: bool = False, keep_biggest_poly_:", "isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo", "polygon or geodataframe geometries in reference to a bounding box e.g. of an", "283, 284], 'other': [23, 24, 25, 30, 31, 32, 35, 36, 40, 42,", "ids. drop_other_classes: Drop classes that are not contained in the reclassification scheme. Returns:", "p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda", "maxx, maxy = reference_bounds w_poly, h_poly = (maxx - minx, maxy - miny)", "gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp:", "geodataframe or shapely Polygon. reference_bounds: Bounding box object or tuple of reference (e.g.", "bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx, y - miny]", "keep_biggest_poly_: bool = False, ) -> GDF: \"\"\"Filter and clip geodataframe to clipping", "equates to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or shapely", "GDF, epsg_code: Union[int, str]) -> GDF: \"\"\"Sets dataframe crs in geopandas pipeline. TODO:", "235], 'maize': [5, 216], 'grassland': [101, 102, 103, 104, 105, 106, 107, 108,", "end of the geodataframe and resets its index. \"\"\" outdf = df[df.geom_type ==", "'other': [23, 24, 25, 30, 31, 32, 35, 36, 40, 42, 51, 52,", "by buffering with 0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid is False: return ingeo.buffer(0)", "name first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend = { 'springcereal':", "y_scaler = nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if", "\"\"\"Explode all multi-polygon geometries in a geodataframe into individual polygon geometries. Adds exploded", "classes_to_drop = [v for values in rcl_scheme.values() for v in values] df =", "minx, y - miny] for x, y in zip(x_coords, y_coords)]) if scale is", "def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the number of", "{'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column with class labels. col_classids: column with class", "Polygon or geodataframe. reference_height: Height (in coordinates or rows) of reference object (polygon", "- y] for x, y in zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon):", "553, 560, 561, 563, 570, 579] # drop other non-crop related classes (forest", "import DataFrame as DF import shapely from shapely.geometry import Polygon import rasterio.crs import", "col_classlabels: column with class labels. col_classids: column with class ids. drop_other_classes: Drop classes", "if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry", "_p: _reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale:", "or keep_biggest_poly_=True.\") return df elif explode_mp_ and keep_biggest_poly_: raise ValueError('You can only use", "df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp =", "-> Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates to pixel coordinates of an image", "\"\"\"Sets dataframe crs in geopandas pipeline. TODO: Deprecate with next rasterio version that", "422, 423, 424, 429, 430, 431, 432, 434, 440, 448, 449, 450, 487,", "11, 13, 14, 15, 16, 17, 22, 57, 220, 221, 222, 223, 235],", "maxy - miny) except (TypeError, ValueError): raise Exception( f'reference_bounds argument is of type", "in {len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif explode_mp_", "import utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid polygons (due", "pandas import DataFrame as DF import shapely from shapely.geometry import Polygon import rasterio.crs", "for idx, row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp = df_temp.append([row] * len(row.geometry),", "df def clip(df: GDF, clip_poly: Polygon, explode_mp_: bool = False, keep_biggest_poly_: bool =", "array. Subtracts point of origin, scales to pixelcoordinates. Input: ingeo: input geodataframe or", "y-axis for COCO format. if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width,", "223, 235], 'maize': [5, 216], 'grassland': [101, 102, 103, 104, 105, 106, 107,", "32, 35, 36, 40, 42, 51, 52, 53, 54, 55, 56, 57, 124,", "30, 31, 32, 35, 36, 40, 42, 51, 52, 53, 54, 55, 56,", "-> Union[Polygon, GDF]: \"\"\"Reduces the number of after comma decimals of a shapely", "COCO format. if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df", "that were created by the intersection. Resets the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly", "raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for i, (chip_window, chip_transform, chip_poly) in", "GeoDataFrame as GDF from pandas import DataFrame as DF import shapely from shapely.geometry", "to be in the same crs as the input geodataframe. explode_mp_: Applies explode_mp", "= ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def set_crs(df: GDF, epsg_code: Union[int, str]) ->", "COCOJson format. Args: ingeo: Input Polygon or geodataframe. reference_height: Height (in coordinates or", "array nrows and ncols parameters. nrows: image array nrows, required for scale. ncols:", "[10,11]} col_classlabels: column with class labels. col_classids: column with class ids. drop_other_classes: Drop", "Input Polygon or geodataframe. reference_height: Height (in coordinates or rows) of reference object", "Clipping polygon geometry, needs to be in the same crs as the input", "poly.is_valid: # Too low precision can potentially lead to invalid polygons due to", "- minx, y - miny] for x, y in zip(x_coords, y_coords)]) if scale", "zip(x_coords, y_coords)]) if scale is False: return p_origin elif scale is True: if", "rasterio.crs import geopandas as gpd from tqdm import tqdm import utils.img def buffer_zero(ingeo:", "clip_poly: Polygon, explode_mp_: bool = False, keep_biggest_poly_: bool = False, ) -> GDF:", "493, 496, 497, 498, 499, 501, 502, 503, 504, 505, 507, 509, 512,", "an image chip. Usage e.g. for COCOJson format. Args: ingeo: Input Polygon or", "or geodataframe, same type as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords", "rasterio.transform.reference_bounds') # Subtract point of origin of image bbox. x_coords, y_coords = poly.exterior.coords.xy", "of after comma decimals of a shapely Polygon or geodataframe geometries. GeoJSON specification", "is False: return ingeo.buffer(0) else: return ingeo elif isinstance(ingeo, GDF): if False in", "542, 543, 544, 545, 547, 548, 549, 550, 551, 552, 553, 560, 561,", "GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo def", "keep_biggest_poly_: raise ValueError('You can only use one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_:", "107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 120,", "else: return ingeo elif isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda", "overlap effects. poly = poly.buffer(0) return poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision)", "- minx, maxy - miny) except (TypeError, ValueError): raise Exception( f'reference_bounds argument is", "260, 261, 262, 263, 264, 266, 267, 268, 269, 270, 281, 282, 283,", "ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True)", "chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if", "shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale,", "the intersection. Resets the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with", "input geodataframe. explode_mp_: Applies explode_mp function. Append dataframe rows for each polygon in", "Resets the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with the biggest", "in enumerate(rcl_scheme.items(), 1): for v in value: rcl_dict[v] = key rcl_dict_id[v] = i", "= df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() if not row_idxs_mp: return df elif not explode_mp_ and", "clipping geometry. The clipping geometry needs to be in the same projection as", "as the input geodataframe. explode_mp_: Applies explode_mp function. Append dataframe rows for each", "explode_mp function. Append dataframe rows for each polygon in potential multipolygons that were", "df def explode_mp(df: GDF) -> GDF: \"\"\"Explode all multi-polygon geometries in a geodataframe", "isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon,", "rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column with class labels.", "scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}' #", "box ' f'instance. Can be delineated from transform, nrows, ncols via rasterio.transform.reference_bounds') #", "elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return ingeo def set_crs(df: GDF,", "579] # drop other non-crop related classes (forest related, environment, recreation, other grass,", "\"\"\" def _reduce_precision(poly: Polygon, precision: int) -> Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] =", "of origin of image bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x -", "import Union, Dict import numpy as np from geopandas import GeoDataFrame as GDF", "all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue # Drop small geometries chip_df =", "chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break # # Clip geometry", "type as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis", "> 5000] #5000 sqm in UTM # Transform to chip pixelcoordinates and invert", "-> Union[GDF, DF]: \"\"\"Reclassify class label and class ids in a dataframe column.", "= chip_df[chip_df.geometry.area * (10 * 10) > 5000] #5000 sqm in UTM #", "126, 162, 170, 171, 172, 173, 174, 180, 182, 260, 261, 262, 263,", "= {} rcl_dict_id = {} for i, (key, value) in enumerate(rcl_scheme.items(), 1): for", "Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid polygons (due to self-intersection) valid by", "raster_transform: rasterio meta['transform'] chip_width: Desired pixel width. chip_height: Desired pixel height. first_n_chips: Only", "intersection resulted in {len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True or keep_biggest_poly_=True.\") return df", "GDF) -> GDF: \"\"\"Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon.\"\"\"", "next rasterio version that will integrate set_crs method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'}", "of image bbox. x_coords, y_coords = poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx, y", "required for scale. ncols: image array ncols, required for scale. Returns: Result polygon", "17, 22, 57, 220, 221, 222, 223, 235], 'maize': [5, 216], 'grassland': [101,", "= vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue", "in UTM # Transform to chip pixelcoordinates and invert y-axis for COCO format.", "keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with the biggest polygon contained in the", "42, 51, 52, 53, 54, 55, 56, 57, 124, 160, 161, 280, 401,", "541, 542, 543, 544, 545, 547, 548, 549, 550, 551, 552, 553, 560,", "the MultiPolygon. Returns: Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p:", "if i >= first_n_chips: break # # Clip geometry to chip chip_df =", "i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break # #", "is None: raise ValueError('ncols and nrows required for scale') x_scaler = ncols /", "reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int )", "df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly,", "from pandas import DataFrame as DF import shapely from shapely.geometry import Polygon import", "36, 40, 42, 51, 52, 53, 54, 55, 56, 57, 124, 160, 161,", "530, 531, 532, 533, 534, 536, 539, 540, 541, 542, 543, 544, 545,", "def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons with the biggest polygon contained in", "scale=scale, nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int ) ->", "right, top) scale: Scale the polygons to the image size/resolution. Requires image array", "chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else:", "'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify class label and class ids", "Deprecate with next rasterio version that will integrate set_crs method. \"\"\" df.crs =", "234, 701, 702, 703, 704, 705], 'wintercereal': [10, 11, 13, 14, 15, 16,", "p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return df def", "103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,", "548, 549, 550, 551, 552, 553, 560, 561, 563, 570, 579] # drop", "drop_other_classes is True: classes_to_drop = [v for values in rcl_scheme.values() for v in", "= df[col_classids].map(rcl_dict_id) return df reclass_legend = { 'springcereal': [1, 2, 3, 4, 6,", "Args: ingeo: input geodataframe or shapely Polygon. precision: number of after comma values", "the geodataframe. Args: df: input geodataframe clip_poly: Clipping polygon geometry, needs to be", "6, 7, 21, 55, 56, 210, 211, 212, 213, 214, 215, 224, 230,", "(left, bottom, right, top) scale: Scale the polygons to the image size/resolution. Requires", "class ids in a dataframe column. # TODO: Simplify & make more efficient!", "497, 498, 499, 501, 502, 503, 504, 505, 507, 509, 512, 513, 514,", "in row_idxs_mp: mp = df.loc[idx].geometry poly_areas = [p.area for p in mp] max_area_poly", "448, 449, 450, 487, 488, 489, 491, 493, 496, 497, 498, 499, 501,", "p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for x, y in zip(x_coords, y_coords)]) return", "\"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height", "contained in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in row_idxs_mp:", "enumerate(rcl_scheme.items(), 1): for v in value: rcl_dict[v] = key rcl_dict_id[v] = i df[f'r_{col_classlabels}']", "507, 509, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522,", "format. Args: ingeo: Input Polygon or geodataframe. reference_height: Height (in coordinates or rows)", "= chip_df.simplify(1, preserve_topology=True) else: continue # Drop small geometries chip_df = chip_df[chip_df.geometry.area *", "vector_df: Geodataframe containing the geometries to be cut to chip geometries. raster_width: rasterio", "chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df,", "meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired pixel width. chip_height: Desired pixel height. first_n_chips:", "isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p)) return", "column with class ids. drop_other_classes: Drop classes that are not contained in the", "its index. \"\"\" outdf = df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon']", "yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)", "polygon or geodataframe, same type as input. \"\"\" def _reduce_precision(poly: Polygon, precision: int)", "potentially lead to invalid polygons due to line overlap effects. poly = poly.buffer(0)", "(maxx - minx, maxy - miny) except (TypeError, ValueError): raise Exception( f'reference_bounds argument", "GDF], reference_height: int ) -> Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon or geodataframe", "in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return df def clip(df:", "118, 120, 121, 122, 123, 125, 126, 162, 170, 171, 172, 173, 174,", "scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels: column with class labels. col_classids: column", "of origin, scales to pixelcoordinates. Input: ingeo: input geodataframe or shapely Polygon. reference_bounds:", "value) in enumerate(rcl_scheme.items(), 1): for v in value: rcl_dict[v] = key rcl_dict_id[v] =", "ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else: return ingeo def close_holes(ingeo: Union[GDF, Polygon]) ->", "outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df: GDF) -> GDF:", "if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p))", "holes by limitation to the exterior ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors: return", "recommends 6 decimal places for latitude and longitude which equates to roughly 10cm", "return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels:", "in the same projection as the geodataframe. Args: df: input geodataframe clip_poly: Clipping", "if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p:", "shapely.geometry import Polygon import rasterio.crs import geopandas as gpd from tqdm import tqdm", "ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None, ncols:", "ncols): try: minx, miny, maxx, maxy = reference_bounds w_poly, h_poly = (maxx -", "reference (e.g. image chip) in format (left, bottom, right, top) scale: Scale the", "Result polygon or geodataframe, same type as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height):", "geodataframe, same type as input. \"\"\" def _reduce_precision(poly: Polygon, precision: int) -> Polygon:", "numpy as np from geopandas import GeoDataFrame as GDF from pandas import DataFrame", ") -> Union[Polygon, GDF]: \"\"\"Invert y-axis of polygon or geodataframe geometries in reference", "if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis,", "_to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int", "DataFrame as DF import shapely from shapely.geometry import Polygon import rasterio.crs import geopandas", "405, 406, 407, 408, 409, 410, 411, 412, 413, 415, 416, 417, 418,", "chip_df, chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height,", "make more efficient! Args: df: input geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3],", "to be in the same projection as the geodataframe. Args: df: input geodataframe", "polygons to the image size/resolution. Requires image array nrows and ncols parameters. nrows:", "column. # TODO: Simplify & make more efficient! Args: df: input geodataframe. rcl_scheme:", "[5, 216], 'grassland': [101, 102, 103, 104, 105, 106, 107, 108, 109, 110,", "the image size/resolution. Requires image array nrows and ncols parameters. nrows: image array", "498, 499, 501, 502, 503, 504, 505, 507, 509, 512, 513, 514, 515,", "25, 30, 31, 32, 35, 36, 40, 42, 51, 52, 53, 54, 55,", "can only use one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif", "row_idxs_mp: return df elif not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in", "elif explode_mp_ and keep_biggest_poly_: raise ValueError('You can only use one of \"explode_mp_\" or", "'MultiPolygon'].tolist() if not row_idxs_mp: return df elif not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning,", "p_origin elif scale is True: if ncols is None or nrows is None:", "160, 161, 280, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410,", "nrows, ncols): try: minx, miny, maxx, maxy = reference_bounds w_poly, h_poly = (maxx", "biggest polygon contained in the MultiPolygon. Returns: Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy()", "# Too low precision can potentially lead to invalid polygons due to line", "for x, y in zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo,", "10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or shapely Polygon. precision: number", "22, 57, 220, 221, 222, 223, 235], 'maize': [5, 216], 'grassland': [101, 102,", "h_poly = (maxx - minx, maxy - miny) except (TypeError, ValueError): raise Exception(", "a shapely Polygon or geodataframe geometries. GeoJSON specification recommends 6 decimal places for", "shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if not poly.is_valid: # Too", "for scale. Returns: Result polygon or geodataframe, same type as input. \"\"\" def", "rows at the end of the geodataframe and resets its index. \"\"\" outdf", "Returns: Result polygon or geodataframe, same type as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo,", "to be a tuple or rasterio bounding box ' f'instance. Can be delineated", "y-axis of polygon or geodataframe geometries in reference to a bounding box e.g.", "else: continue # Drop small geometries chip_df = chip_df[chip_df.geometry.area * (10 * 10)", "= ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon,", "vector geodataframe to chip geometries. Filters small polygons and skips empty chips. Args:", "remain. Returns: Result polygon or geodataframe, same type as input. \"\"\" def _reduce_precision(poly:", "clip_poly: Clipping polygon geometry, needs to be in the same crs as the", "= shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if not poly.is_valid: #", "set_crs method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF) ->", "216], 'grassland': [101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,", "Polygon or geodataframe geometries. GeoJSON specification recommends 6 decimal places for latitude and", "if not poly.is_valid: # Too low precision can potentially lead to invalid polygons", "elif not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons.", "raster_width: rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired pixel width.", "528, 529, 530, 531, 532, 533, 534, 536, 539, 540, 541, 542, 543,", "str]) -> GDF: \"\"\"Sets dataframe crs in geopandas pipeline. TODO: Deprecate with next", "geometries to be cut to chip geometries. raster_width: rasterio meta['width'] raster_height: rasterio meta['height']", "shapely.geometry.Polygon([[x - minx, y - miny] for x, y in zip(x_coords, y_coords)]) if", "210, 211, 212, 213, 214, 215, 224, 230, 234, 701, 702, 703, 704,", "DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id', drop_other_classes: bool=True ) ->", "for latitude and longitude which equates to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args:", "in reference to a bounding box e.g. of an image chip. Usage e.g.", "return Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF):", "dataframe. \"\"\" if drop_other_classes is True: classes_to_drop = [v for values in rcl_scheme.values()", "in rcl_scheme.values() for v in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id", "permanent grass, # wasteland, ..) } def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) ->", "57, 220, 221, 222, 223, 235], 'maize': [5, 216], 'grassland': [101, 102, 103,", "408, 409, 410, 411, 412, 413, 415, 416, 417, 418, 420, 421, 422,", "109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121, 122,", "182, 260, 261, 262, 263, 264, 266, 267, 268, 269, 270, 281, 282,", "as the geodataframe. Args: df: input geodataframe clip_poly: Clipping polygon geometry, needs to", "chips. Args: vector_df: Geodataframe containing the geometries to be cut to chip geometries.", "generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs = {} for i,", "ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else: return ingeo def close_holes(ingeo: Union[GDF,", "of reference (e.g. image chip) in format (left, bottom, right, top) scale: Scale", "Polygon: geojson = shapely.geometry.mapping(poly) geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision) poly = shapely.geometry.shape(geojson) if not", "skips empty chips. Args: vector_df: Geodataframe containing the geometries to be cut to", "latitude and longitude which equates to roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo:", "explode_mp_: Applies explode_mp function. Append dataframe rows for each polygon in potential multipolygons", "try: minx, miny, maxx, maxy = reference_bounds w_poly, h_poly = (maxx - minx,", "= df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon'] for idx, row in", "\"\"\"Workflow to cut a vector geodataframe to chip geometries. Filters small polygons and", "minx, maxy - miny) except (TypeError, ValueError): raise Exception( f'reference_bounds argument is of", "[p.area for p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return", "115, 116, 117, 118, 120, 121, 122, 123, 125, 126, 162, 170, 171,", "i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True)", "efficient! Args: df: input geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]}", "ncols is None or nrows is None: raise ValueError('ncols and nrows required for", "width. chip_height: Desired pixel height. first_n_chips: Only processes the first n image chips,", "Subtracts point of origin, scales to pixelcoordinates. Input: ingeo: input geodataframe or shapely", "'geometry'] = row.geometry[i] outdf = outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df:", "431, 432, 434, 440, 448, 449, 450, 487, 488, 489, 491, 493, 496,", "chip_transform, chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height, raster_transform=raster_transform, chip_width=chip_width, chip_height=chip_height, skip_partial_chips=True) all_chip_dfs", "multi-polygon geometries in a geodataframe into individual polygon geometries. Adds exploded polygons as", "125, 126, 162, 170, 171, 172, 173, 174, 180, 182, 260, 261, 262,", "tuple or rasterio bounding box ' f'instance. Can be delineated from transform, nrows,", "v in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id = {} for", "ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple],", "polygons and skips empty chips. Args: vector_df: Geodataframe containing the geometries to be", "to invalid polygons due to line overlap effects. poly = poly.buffer(0) return poly", "chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break # # Clip geometry to", "geodataframe into individual polygon geometries. Adds exploded polygons as rows at the end", "_p: _invert_y_axis(poly=_p, reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None):", "shapely.geometry.Polygon([[x, reference_height - y] for x, y in zip(x_coords, y_coords)]) return p_inverted_y_axis if", "cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a vector geodataframe", "explode_mp_: bool = False, keep_biggest_poly_: bool = False, ) -> GDF: \"\"\"Filter and", "exploded polygons as rows at the end of the geodataframe and resets its", "402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 415,", "_p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type ==", "if isinstance(ingeo, Polygon): if ingeo.is_valid is False: return ingeo.buffer(0) else: return ingeo elif", "Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry", "264, 266, 267, 268, 269, 270, 281, 282, 283, 284], 'other': [23, 24,", "into individual polygon geometries. Adds exploded polygons as rows at the end of", "type {type(reference_bounds)}, needs to be a tuple or rasterio bounding box ' f'instance.", "format (left, bottom, right, top) scale: Scale the polygons to the image size/resolution.", "number of after comma decimals of a shapely Polygon or geodataframe geometries. GeoJSON", "is True: classes_to_drop = [v for values in rcl_scheme.values() for v in values]", "df_temp.append([row] * len(row.geometry), ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i] outdf", "input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x,", "for p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return df", "for scale. ncols: image array ncols, required for scale. Returns: Result polygon or", "chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}' all_chip_dfs[chip_name] = {'chip_df': chip_df, 'chip_window': chip_window, 'chip_transform': chip_transform,", "'geometry'] = max_area_poly return df def clip(df: GDF, clip_poly: Polygon, explode_mp_: bool =", "clip geodataframe to clipping geometry. The clipping geometry needs to be in the", "not poly.is_valid: # Too low precision can potentially lead to invalid polygons due", "705], 'wintercereal': [10, 11, 13, 14, 15, 16, 17, 22, 57, 220, 221,", "df[col_classids].map(rcl_dict_id) return df reclass_legend = { 'springcereal': [1, 2, 3, 4, 6, 7,", "geodataframe or shapely Polygon. precision: number of after comma values that should remain.", "549, 550, 551, 552, 553, 560, 561, 563, 570, 579] # drop other", "Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon holes by limitation to the exterior", "or rows) of reference object (polygon or image, e.g. image chip. Returns: Result", "-> GDF: \"\"\"Sets dataframe crs in geopandas pipeline. TODO: Deprecate with next rasterio", "GDF: \"\"\"Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon.\"\"\" row_idxs_mp =", "contained in the reclassification scheme. Returns: Result dataframe. \"\"\" if drop_other_classes is True:", "Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision))", "i, (key, value) in enumerate(rcl_scheme.items(), 1): for v in value: rcl_dict[v] = key", "Args: df: input geodataframe. rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]} col_classlabels:", "return poly if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda", "col_classids: str= 'lcsub_id', drop_other_classes: bool=True ) -> Union[GDF, DF]: \"\"\"Reclassify class label and", "The clipping geometry needs to be in the same projection as the geodataframe.", "in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id = {} for i,", "a vector geodataframe to chip geometries. Filters small polygons and skips empty chips.", "False in ingeo.geometry.is_valid.unique(): ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0)) return ingeo else: return ingeo", "= df_temp.append([row] * len(row.geometry), ignore_index=True) for i in range(len(row.geometry)): df_temp.loc[i, 'geometry'] = row.geometry[i]", "keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids: str= 'lcsub_id',", "delineated from transform, nrows, ncols via rasterio.transform.reference_bounds') # Subtract point of origin of", "for each polygon in potential multipolygons that were created by the intersection. Resets", "263, 264, 266, 267, 268, 269, 270, 281, 282, 283, 284], 'other': [23,", "547, 548, 549, 550, 551, 552, 553, 560, 561, 563, 570, 579] #", "polygon or geodataframe, same type as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords,", "# geo.py import warnings from typing import Union, Dict import numpy as np", "one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df)", "reference_height=reference_height)) return ingeo def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to", "False, keep_biggest_poly_: bool = False, ) -> GDF: \"\"\"Filter and clip geodataframe to", "def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a vector", "reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds,", "return ingeo def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon holes by", "Polygon, explode_mp_: bool = False, keep_biggest_poly_: bool = False, ) -> GDF: \"\"\"Filter", "isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p,", "chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height) else: continue", "def _close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo, Polygon):", "\"\"\"Make invalid polygons (due to self-intersection) valid by buffering with 0.\"\"\" if isinstance(ingeo,", "(10 * 10) > 5000] #5000 sqm in UTM # Transform to chip", "(not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons. Use \" f\"explode_mp_=True or", "box e.g. of an image chip. Usage e.g. for COCOJson format. Args: ingeo:", "= df.loc[idx].geometry poly_areas = [p.area for p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx,", "_close_holes(_p)) return ingeo def set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF: \"\"\"Sets dataframe", "in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in row_idxs_mp: mp", "GDF], precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the number of after comma decimals", "for v in values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id = {}", "import shapely from shapely.geometry import Polygon import rasterio.crs import geopandas as gpd from", "not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True, ncols=chip_width, nrows=chip_height) chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height)", "df: input geodataframe clip_poly: Clipping polygon geometry, needs to be in the same", "ingeo: Input Polygon or geodataframe. reference_height: Height (in coordinates or rows) of reference", "empty chips. Args: vector_df: Geodataframe containing the geometries to be cut to chip", "geopandas as gpd from tqdm import tqdm import utils.img def buffer_zero(ingeo: Union[GDF, Polygon])", "from geopandas import GeoDataFrame as GDF from pandas import DataFrame as DF import", "106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,", "geometries. raster_width: rasterio meta['width'] raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired pixel", "version that will integrate set_crs method. \"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return df", "containing the final chip_df, chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width, raster_height=raster_height,", "comma values that should remain. Returns: Result polygon or geodataframe, same type as", "chip_width: Desired pixel width. chip_height: Desired pixel height. first_n_chips: Only processes the first", "Returns: Result geodataframe. \"\"\" df = df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) #", "processes the first n image chips, used for debugging. Returns: Dictionary containing the", "Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for", "df elif not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split", "decimal places for latitude and longitude which equates to roughly 10cm of precision", "vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True) if not all(chip_df.geometry.is_empty): chip_df.geometry = chip_df.simplify(1, preserve_topology=True) else: continue #", "via rasterio.transform.reference_bounds') # Subtract point of origin of image bbox. x_coords, y_coords =", "Union[int, str]) -> GDF: \"\"\"Sets dataframe crs in geopandas pipeline. TODO: Deprecate with", "elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme:", "geopandas import GeoDataFrame as GDF from pandas import DataFrame as DF import shapely", "533, 534, 536, 539, 540, 541, 542, 543, 544, 545, 547, 548, 549,", "Union[Polygon, GDF]: \"\"\"Converts projected polygon coordinates to pixel coordinates of an image array.", "and class ids in a dataframe column. # TODO: Simplify & make more", "or rasterio bounding box ' f'instance. Can be delineated from transform, nrows, ncols", "same type as input. \"\"\" def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy", "first_n_chips: Only processes the first n image chips, used for debugging. Returns: Dictionary", "return poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry =", "polygon geometries. Adds exploded polygons as rows at the end of the geodataframe", "low precision can potentially lead to invalid polygons due to line overlap effects.", "precision) poly = shapely.geometry.shape(geojson) if not poly.is_valid: # Too low precision can potentially", "for debugging. Returns: Dictionary containing the final chip_df, chip_window, chip_transform, chip_poly objects. \"\"\"", "the input geodataframe. explode_mp_: Applies explode_mp function. Append dataframe rows for each polygon", "_to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols): try: minx, miny, maxx, maxy = reference_bounds", "111, 112, 113, 114, 115, 116, 117, 118, 120, 121, 122, 123, 125,", "needs to be a tuple or rasterio bounding box ' f'instance. Can be", "df[df.geometry.intersects(clip_poly)].copy() df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly, how='intersection') #", "dataframe crs in geopandas pipeline. TODO: Deprecate with next rasterio version that will", "mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return df def clip(df: GDF, clip_poly: Polygon, explode_mp_:", "are not contained in the reclassification scheme. Returns: Result dataframe. \"\"\" if drop_other_classes", "origin, scales to pixelcoordinates. Input: ingeo: input geodataframe or shapely Polygon. reference_bounds: Bounding", "488, 489, 491, 493, 496, 497, 498, 499, 501, 502, 503, 504, 505,", "'MultiPolygon'].tolist() for idx in row_idxs_mp: mp = df.loc[idx].geometry poly_areas = [p.area for p", "404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 415, 416, 417,", "55, 56, 210, 211, 212, 213, 214, 215, 224, 230, 234, 701, 702,", "Polygon]: \"\"\"Close polygon holes by limitation to the exterior ring.\"\"\" def _close_holes(poly: Polygon):", "415, 416, 417, 418, 420, 421, 422, 423, 424, 429, 430, 431, 432,", "213, 214, 215, 224, 230, 234, 701, 702, 703, 704, 705], 'wintercereal': [10,", "same crs as the input geodataframe. explode_mp_: Applies explode_mp function. Append dataframe rows", "f\"explode_mp_=True or keep_biggest_poly_=True.\") return df elif explode_mp_ and keep_biggest_poly_: raise ValueError('You can only", "keep_biggest_poly_=True.\") return df elif explode_mp_ and keep_biggest_poly_: raise ValueError('You can only use one", "'springcereal': [1, 2, 3, 4, 6, 7, 21, 55, 56, 210, 211, 212,", "precision: int=3) -> Union[Polygon, GDF]: \"\"\"Reduces the number of after comma decimals of", "index. \"\"\" outdf = df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon'] for", "ValueError): raise Exception( f'reference_bounds argument is of type {type(reference_bounds)}, needs to be a", "shapely from shapely.geometry import Polygon import rasterio.crs import geopandas as gpd from tqdm", "and clip geodataframe to clipping geometry. The clipping geometry needs to be in", "buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make invalid polygons (due to self-intersection) valid", "mp = df.loc[idx].geometry poly_areas = [p.area for p in mp] max_area_poly = mp[poly_areas.index(max(poly_areas))]", "280, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412,", "image array. Subtracts point of origin, scales to pixelcoordinates. Input: ingeo: input geodataframe", "return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds,", "516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528,", "412, 413, 415, 416, 417, 418, 420, 421, 422, 423, 424, 429, 430,", "540, 541, 542, 543, 544, 545, 547, 548, 549, 550, 551, 552, 553,", "geopandas pipeline. TODO: Deprecate with next rasterio version that will integrate set_crs method.", "113, 114, 115, 116, 117, 118, 120, 121, 122, 123, 125, 126, 162,", "df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon'] for idx, row in df_mp.iterrows():", "not explode_mp_ and (not keep_biggest_poly_): warnings.warn(f\"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons. Use", "- miny) except (TypeError, ValueError): raise Exception( f'reference_bounds argument is of type {type(reference_bounds)},", "121, 122, 123, 125, 126, 162, 170, 171, 172, 173, 174, 180, 182,", "values] df = df[df[col_classids].isin(classes_to_drop)].copy() rcl_dict = {} rcl_dict_id = {} for i, (key,", "Union[Polygon, GDF]: \"\"\"Reduces the number of after comma decimals of a shapely Polygon", "scales to pixelcoordinates. Input: ingeo: input geodataframe or shapely Polygon. reference_bounds: Bounding box", "import GeoDataFrame as GDF from pandas import DataFrame as DF import shapely from", "df[col_classids].copy().map(rcl_dict) # map name first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend", "221, 222, 223, 235], 'maize': [5, 216], 'grassland': [101, 102, 103, 104, 105,", "in a dataframe column. # TODO: Simplify & make more efficient! Args: df:", "def set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF: \"\"\"Sets dataframe crs in geopandas", "return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF], rcl_scheme: Dict, col_classlabels: str= 'lcsub', col_classids: str=", "= df.geometry.apply(lambda _p: _p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp", "212, 213, 214, 215, 224, 230, 234, 701, 702, 703, 704, 705], 'wintercereal':", "top) scale: Scale the polygons to the image size/resolution. Requires image array nrows", "rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first, id second! df[f'r_{col_classids}']", "Input: ingeo: input geodataframe or shapely Polygon. reference_bounds: Bounding box object or tuple", "needs to be in the same projection as the geodataframe. Args: df: input", "bottom, right, top) scale: Scale the polygons to the image size/resolution. Requires image", "effects. poly = poly.buffer(0) return poly if isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif", "mp] max_area_poly = mp[poly_areas.index(max(poly_areas))] df.loc[idx, 'geometry'] = max_area_poly return df def clip(df: GDF,", "'wintercereal': [10,11]} col_classlabels: column with class labels. col_classids: column with class ids. drop_other_classes:", "image size/resolution. Requires image array nrows and ncols parameters. nrows: image array nrows,", "reclass_legend = { 'springcereal': [1, 2, 3, 4, 6, 7, 21, 55, 56,", "282, 283, 284], 'other': [23, 24, 25, 30, 31, 32, 35, 36, 40,", "ncols=ncols)) return ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int ) -> Union[Polygon, GDF]:", "preserve_topology=True) else: continue # Drop small geometries chip_df = chip_df[chip_df.geometry.area * (10 *", "with class ids. drop_other_classes: Drop classes that are not contained in the reclassification", "from tqdm import tqdm import utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]:", "geo.py import warnings from typing import Union, Dict import numpy as np from", "isinstance(ingeo, Polygon): return _reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p,", "\"\"\" df.crs = {'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF) -> GDF: \"\"\"Explode", "the reclassification scheme. Returns: Result dataframe. \"\"\" if drop_other_classes is True: classes_to_drop =", "scale, nrows, ncols): try: minx, miny, maxx, maxy = reference_bounds w_poly, h_poly =", "chip_df.simplify(1, preserve_topology=True) else: continue # Drop small geometries chip_df = chip_df[chip_df.geometry.area * (10", "523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 536,", "all_chip_dfs = {} for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >=", "dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with the biggest polygon contained", "tqdm import tqdm import utils.img def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Make", "TODO: Deprecate with next rasterio version that will integrate set_crs method. \"\"\" df.crs", "same type as input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols): try:", "x_scaler = ncols / w_poly y_scaler = nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler,", "# map name first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend =", "ingeo: input geodataframe or shapely Polygon. reference_bounds: Bounding box object or tuple of", "be a tuple or rasterio bounding box ' f'instance. Can be delineated from", "ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons with", "skip_partial_chips=True) all_chip_dfs = {} for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i", "550, 551, 552, 553, 560, 561, 563, 570, 579] # drop other non-crop", "precision=precision)) return ingeo def to_pixelcoords(ingeo: Union[Polygon, GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows:", "resets its index. \"\"\" outdf = df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type ==", "after comma decimals of a shapely Polygon or geodataframe geometries. GeoJSON specification recommends", "{'init': f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF) -> GDF: \"\"\"Explode all multi-polygon geometries", "_reduce_precision(poly=ingeo, precision=precision) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision)) return ingeo", "specification recommends 6 decimal places for latitude and longitude which equates to roughly", "-> GDF: \"\"\"Explode all multi-polygon geometries in a geodataframe into individual polygon geometries.", "buffering with 0.\"\"\" if isinstance(ingeo, Polygon): if ingeo.is_valid is False: return ingeo.buffer(0) else:", "shapely.geometry.shape(geojson) if not poly.is_valid: # Too low precision can potentially lead to invalid", "w_poly, h_poly = (maxx - minx, maxy - miny) except (TypeError, ValueError): raise", "# Transform to chip pixelcoordinates and invert y-axis for COCO format. if not", "(https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or shapely Polygon. precision: number of after comma", "function. Append dataframe rows for each polygon in potential multipolygons that were created", "411, 412, 413, 415, 416, 417, 418, 420, 421, 422, 423, 424, 429,", "ingeo def invert_y_axis(ingeo: Union[Polygon, GDF], reference_height: int ) -> Union[Polygon, GDF]: \"\"\"Invert y-axis", "ingeo.buffer(0) else: return ingeo elif isinstance(ingeo, GDF): if False in ingeo.geometry.is_valid.unique(): ingeo.geometry =", "# wasteland, ..) } def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]:", "ingeo.is_valid is False: return ingeo.buffer(0) else: return ingeo elif isinstance(ingeo, GDF): if False", "debugging. Returns: Dictionary containing the final chip_df, chip_window, chip_transform, chip_poly objects. \"\"\" generator_window_bounds", "Filters small polygons and skips empty chips. Args: vector_df: Geodataframe containing the geometries", "reference_bounds w_poly, h_poly = (maxx - minx, maxy - miny) except (TypeError, ValueError):", "dataframe column. # TODO: Simplify & make more efficient! Args: df: input geodataframe.", "scale. Returns: Result polygon or geodataframe, same type as input. \"\"\" def _to_pixelcoords(poly:", "y in zip(x_coords, y_coords)]) return p_inverted_y_axis if isinstance(ingeo, Polygon): return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif", "image chips, used for debugging. Returns: Dictionary containing the final chip_df, chip_window, chip_transform,", "else: return poly if isinstance(ingeo, Polygon): return _close_holes(ingeo) elif isinstance(ingeo, GDF): ingeo.geometry =", "102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,", "row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist() for idx in row_idxs_mp: mp = df.loc[idx].geometry poly_areas", "geodataframe. reference_height: Height (in coordinates or rows) of reference object (polygon or image,", "= df[df.geom_type == 'MultiPolygon'] for idx, row in df_mp.iterrows(): df_temp = gpd.GeoDataFrame(columns=df_mp.columns) df_temp", "geodataframe clip_poly: Clipping polygon geometry, needs to be in the same crs as", "roughly 10cm of precision (https://github.com/perrygeo/geojson-precision). Args: ingeo: input geodataframe or shapely Polygon. precision:", "520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532,", "for i, (key, value) in enumerate(rcl_scheme.items(), 1): for v in value: rcl_dict[v] =", "isinstance(ingeo, Polygon): if ingeo.is_valid is False: return ingeo.buffer(0) else: return ingeo elif isinstance(ingeo,", "or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF,", "function. Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon. Returns: Result", "else: return ingeo def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]: \"\"\"Close polygon holes", "scheme. Returns: Result dataframe. \"\"\" if drop_other_classes is True: classes_to_drop = [v for", "xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo, Polygon): return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows,", "invert y-axis for COCO format. if not all(chip_df.geometry.is_empty): chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True,", "related classes (forest related, environment, recreation, other grass, permanent grass, # wasteland, ..)", "Applies keep_biggest_poly function. Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon.", "chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a vector geodataframe to chip geometries. Filters small", "(chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)): if i >= first_n_chips: break # # Clip", "[23, 24, 25, 30, 31, 32, 35, 36, 40, 42, 51, 52, 53,", "decimals of a shapely Polygon or geodataframe geometries. GeoJSON specification recommends 6 decimal", "outdf = df[df.geom_type == 'Polygon'] df_mp = df[df.geom_type == 'MultiPolygon'] for idx, row", "401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,", "Args: ingeo: Input Polygon or geodataframe. reference_height: Height (in coordinates or rows) of", "sqm in UTM # Transform to chip pixelcoordinates and invert y-axis for COCO", "in geopandas pipeline. TODO: Deprecate with next rasterio version that will integrate set_crs", "GDF]: \"\"\"Reduces the number of after comma decimals of a shapely Polygon or", "return _invert_y_axis(poly=ingeo, reference_height=reference_height) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height)) return", "nrows / h_poly return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0)) if isinstance(ingeo, Polygon):", "poly.exterior.coords.xy p_origin = shapely.geometry.Polygon([[x - minx, y - miny] for x, y in", "polygon or geodataframe, same type as input. \"\"\" def _to_pixelcoords(poly: Polygon, reference_bounds, scale,", "rcl_dict = {} rcl_dict_id = {} for i, (key, value) in enumerate(rcl_scheme.items(), 1):", "outdf def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces MultiPolygons with the biggest polygon contained", "array ncols, required for scale. Returns: Result polygon or geodataframe, same type as", "return df def clip(df: GDF, clip_poly: Polygon, explode_mp_: bool = False, keep_biggest_poly_: bool", "import Polygon import rasterio.crs import geopandas as gpd from tqdm import tqdm import", "minx, miny, maxx, maxy = reference_bounds w_poly, h_poly = (maxx - minx, maxy", "270, 281, 282, 283, 284], 'other': [23, 24, 25, 30, 31, 32, 35,", "Transform to chip pixelcoordinates and invert y-axis for COCO format. if not all(chip_df.geometry.is_empty):", "crs in geopandas pipeline. TODO: Deprecate with next rasterio version that will integrate", "map name first, id second! df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id) return df reclass_legend = {", "raster_height: rasterio meta['height'] raster_transform: rasterio meta['transform'] chip_width: Desired pixel width. chip_height: Desired pixel", "GDF], reference_bounds: Union[rasterio.coords.BoundingBox, tuple], scale: bool=False, nrows: int=None, ncols: int=None ) -> Union[Polygon,", "intersection. Resets the dataframe index! keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with the", "rcl_dict[v] = key rcl_dict_id[v] = i df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first,", "ring.\"\"\" def _close_holes(poly: Polygon): if poly.interiors: return Polygon(list(poly.exterior.coords)) else: return poly if isinstance(ingeo,", "return p_origin elif scale is True: if ncols is None or nrows is", "\"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df: Union[GDF, DF],", "Drop classes that are not contained in the reclassification scheme. Returns: Result dataframe.", "the same crs as the input geodataframe. explode_mp_: Applies explode_mp function. Append dataframe", "f'epsg:{str(epsg_code)}'} return df def explode_mp(df: GDF) -> GDF: \"\"\"Explode all multi-polygon geometries in", "image chip) in format (left, bottom, right, top) scale: Scale the polygons to", "classes that are not contained in the reclassification scheme. Returns: Result dataframe. \"\"\"", "only use one of \"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_:", "* 10) > 5000] #5000 sqm in UTM # Transform to chip pixelcoordinates", "to pixel coordinates of an image array. Subtracts point of origin, scales to", "if drop_other_classes is True: classes_to_drop = [v for values in rcl_scheme.values() for v", "\"\"\"Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon.\"\"\" row_idxs_mp = df.index[df.geometry.geom_type", "_p: _p.intersection(clip_poly)) # df = gpd.overlay(df, clip_poly, how='intersection') # Slower. row_idxs_mp = df.index[df.geometry.geom_type", "box object or tuple of reference (e.g. image chip) in format (left, bottom,", "return df def explode_mp(df: GDF) -> GDF: \"\"\"Explode all multi-polygon geometries in a", "\"explode_mp_\" or \"keep_biggest_poly_\"!') elif explode_mp_: return explode_mp(df) elif keep_biggest_poly_: return keep_biggest_poly(df) def reclassify_col(df:", "def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height): x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height -", "return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols) elif isinstance(ingeo, GDF): ingeo.geometry = ingeo.geometry.apply(lambda _p:", "x_coords, y_coords = poly.exterior.coords.xy p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for x, y", "chip_width=128, chip_height=128, first_n_chips=None): \"\"\"Workflow to cut a vector geodataframe to chip geometries. Filters", "geometries in reference to a bounding box e.g. of an image chip. Usage", "= outdf.append(df_temp, ignore_index=True) outdf.reset_index(drop=True, inplace=True) return outdf def keep_biggest_poly(df: GDF) -> GDF: \"\"\"Replaces", "other non-crop related classes (forest related, environment, recreation, other grass, permanent grass, #", "object (polygon or image, e.g. image chip. Returns: Result polygon or geodataframe, same" ]
[ "SimpleProduct: def solution(self, value1, value2): return \"PROD = \" + str(value1 * value2)", "class SimpleProduct: def solution(self, value1, value2): return \"PROD = \" + str(value1 *" ]
[ "given nodes joined one after another. Parameters ---------- *nodes : hashable Items to", "SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test", "self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test the instance method calls the top-level function", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test the instance", "), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test the", "method calls the top-level function correctly. expr = expression.set_items().set_items(optional=True) expected = [ create_graph(", "# the conditions described in the aforementioned license. The license # is also", "test_metadata_method_notify_true(self): # Test the instance method calls the top-level function correctly. expr =", "divert in the future. top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait)", "test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=True), ), ] actual", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test the instance method calls", "Items to be attached as nodes Returns ------- ObserverGraph \"\"\" node = nodes[-1]", "all result in the same graphs expr1 = create_expression(1) expr2 = create_expression(2) combined1", "expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual,", "top_level = expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\"", "top-level function correctly. expr = expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False,", "ObserverExpression.match \"\"\" def setUp(self): def anytrait(name, trait): return True self.anytrait = anytrait def", "ListItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr =", "= [ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[ create_graph(observer3),", "2 observer3 = 3 observer4 = 4 expr1 = create_expression(observer1) expr2 = create_expression(observer2)", "notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test", "method calls the top-level function correctly. expr = expression.list_items().list_items(optional=True) expected = [ create_graph(", "if the two need to divert in the future. top_level = expression.dict_items method", "DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self):", "expr2 combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1", "[ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "= expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test", "= create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The following all result in the", "calls the top-level function correctly. expr = expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True,", "expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self):", "expr = expression.match(filter=self.anytrait, notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual", "create_graph( DictItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr", "observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1 |", "expected = [ create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self):", "\"\"\" node = nodes[-1] graph = ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph =", "= expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected = [ create_graph(", "function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False),", "notify=True, optional=True) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): # Test", "= [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=True), ), ] actual", "self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=True),", "expr1 = create_expression(1) expr2 = create_expression(2) combined1 = expression.join(expr1, expr2) combined2 = expr1.then(expr2)", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test the instance method calls", "method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [", "ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test", "expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(notify=False) expected = [", "# All rights reserved. # # This software is provided without warranty under", "Test the instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(notify=False) expected", "create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected = [ create_graph(", "expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): # Test the top-level function expr = expression.match(filter=self.anytrait,", "# Test the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\")", "expr4 = create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected = [ create_graph( observer1,", "= expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def", "= 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1.then(expr2) expected =", "expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait,", "expected) def test_or_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2", "in the future. top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) )", "= expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def", "DictItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr =", "under the terms of the BSD # license included in LICENSE.txt and may", "def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False, optional=False), ), ]", "top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\"", "\"\"\" def test_list_items(self): expr = expression.list_items() expected = [ create_graph( ListItemObserver(notify=True, optional=False), ),", "Austin, TX # All rights reserved. # # This software is provided without", "[ create_graph( DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self):", "expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ] actual =", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): # Test the instance", "create_graph(observer4), ], ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1 =", "= 1 observer2 = 2 observer3 = 3 observer4 = 4 expr1 =", "expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False,", "traits.observation import expression from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer", "test_metadata_notify_true(self): # Test the top-level function expr = expression.metadata(\"butterfly\") expected = [ create_graph(", "top_level = expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\"", "| expr2).then(expr3 | expr4) expected = [ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ],", "= expression.list_items() expected = [ create_graph( ListItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs()", "the terms of the BSD # license included in LICENSE.txt and may be", "expr2 = create_expression(observer2) expr = expression.join(expr1, expr2) expected = [ create_graph( observer1, observer2,", "import SetItemObserver from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\" Create an ObserverGraph with", "instance method calls the top-level function correctly. expr = expression.set_items().set_items(optional=True) expected = [", "= [ create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) class", "def test_or_then_chained(self): observer1 = 1 observer2 = 2 observer3 = 3 observer4 =", "= create_expression(observer2) expr = expr1.then(expr2) expected = [ create_graph( observer1, observer2, ) ]", "traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\" Create an ObserverGraph", "TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr = expression.dict_items() expected = [", "self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test the instance method calls the top-level function", "test_list_items_method_notify(self): # Test the instance method calls the top-level function correctly. expr =", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test the instance", "the conditions described in the aforementioned license. The license # is also available", "if the two need to divert in the future. top_level = expression.match method", "\"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr = expression.list_items() expected = [ create_graph(", "top-level function expr = expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ]", "Test the top-level function expr = expression.trait(\"name\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\",", "def anytrait(name, trait): return True self.anytrait = anytrait def test_match_notify_true(self): # Test the", "SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr =", "| (expr3.then(expr4)) expected = [ create_graph( observer1, observer2, ), create_graph( observer3, observer4, ),", "class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr = expression.set_items() expected =", "= expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test", "test_set_items_method_notify(self): # Test the instance method calls the top-level function correctly. expr =", "expected = [ create_graph( ListItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "test_join_equality_with_then(self): # The following all result in the same graphs expr1 = create_expression(1)", "top-level function expr = expression.trait(\"name\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False)", "test_trait_name_optional_true(self): # Test the top-level function expr = expression.trait(\"name\", optional=True) expected = [", "online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! import inspect", "inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr = expression.dict_items()", "method calls the top-level function correctly. expr = expression.list_items().list_items(notify=False) expected = [ create_graph(", "expression with a dummy observer for testing purposes. Parameters ---------- observer : hashable", "be used as a node on ObserverGraph Returns ------- expression : ObserverExpression \"\"\"", "SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self):", "\"\"\" def test_set_items(self): expr = expression.set_items() expected = [ create_graph( SetItemObserver(notify=True, optional=False), ),", "expr = expr1 | expr2 expected = [ create_graph(observer1), create_graph(observer2), ] actual =", "top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait,", "top-level function expr = expression.match(filter=self.anytrait, notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ),", "in the future. top_level = expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) )", "expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs()", "create_graph( ListItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr", "def test_trait_method_optional_true(self): # Test the instance method calls the top-level function correctly. expr", "need to divert in the future. top_level = expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual(", "notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test the", "notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test the", "= expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs()", "expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self):", "also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source!", "= expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual", "), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test the", "expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test the instance method calls the top-level", "(expr1 | expr2).then(expr3 | expr4) expected = [ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4),", "[ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self):", "= 2 observer3 = 3 observer4 = 4 expr1 = create_expression(observer1) expr2 =", "= expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test", "ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer):", "self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test __or__ will maintain the order provided by", "= [ create_graph( ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True,", "def test_match_method_notify_false(self): # Test the instance method calls the top-level function correctly. expr", "create_graph(observer3), create_graph(observer4), ], ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1", "expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ] actual =", "= 4 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 =", "create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test __or__ will", "# Remove this if the two need to divert in the future. top_level_trait", "= [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver(", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test the top-level function expr", "the instance method calls the top-level function correctly. expr = expression.set_items().set_items(notify=False) expected =", "Enthought, Inc., Austin, TX # All rights reserved. # # This software is", "graph def create_expression(observer): \"\"\" Create an expression with a dummy observer for testing", "def setUp(self): def anytrait(name, trait): return True self.anytrait = anytrait def test_match_notify_true(self): #", "= expr1 | expr2 expected = [ create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs()", "function signatures in-sync. # Remove this if the two need to divert in", "the top-level function correctly. expr = expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=False),", "inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr =", "def test_dict_items(self): expr = expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True, optional=False), ), ]", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test __or__ will maintain the", "calls the top-level function correctly. expr = expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True,", "= create_expression(observer2) combined1 = expr1 | expr2 combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(),", "of ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self): observer = 1 expr = create_expression(observer)", "# Test the top-level function expr = expression.trait(\"name\", notify=False) expected = [ create_graph(", "future. top_level = expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase):", "method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\"", "Test the instance method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait,", "NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\" Create", "= [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expression.join(expr1, expr2) expected =", "developers keeping the two function signatures in-sync. # Remove this if the two", "= create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1 | expr2).then(expr3", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1 = 1 observer2", "], ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1 = 1", "this if the two need to divert in the future. top_level_trait = expression.trait", "class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self): # Test the top-level function", "create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The following all result", "self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test the top-level function expr = expression.trait(\"name\", notify=False)", "expected) def test_list_items_method_optional(self): # Test the instance method calls the top-level function correctly.", "expected) def test_metadata_method_notify_true(self): # Test the instance method calls the top-level function correctly.", "expected) def test_trait_method(self): # Test the instance method calls the top-level function correctly.", "nodes Returns ------- ObserverGraph \"\"\" node = nodes[-1] graph = ObserverGraph(node=node) for node", "expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test the instance method calls the top-level", "= expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test the instance method calls the", "# Test the top-level function expr = expression.metadata(\"butterfly\", notify=False) expected = [ create_graph(", "= (expr1.then(expr2)) | (expr3.then(expr4)) expected = [ create_graph( observer1, observer2, ), create_graph( observer3,", "the two need to divert in the future. top_level = expression.dict_items method =", "\"\"\" Test composition of ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self): observer = 1", "expr = expression.list_items() expected = [ create_graph( ListItemObserver(notify=True, optional=False), ), ] actual =", "= [ create_graph( observer1, observer2, ), create_graph( observer3, observer4, ), ] actual =", "expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"),", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test the instance", "= create_expression(observer2) expr = expr1 | expr2 expected = [ create_graph(observer1), create_graph(observer2), ]", "create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1", "# Test to help developers keeping the two function signatures in-sync. # Remove", "= expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test the instance method calls the", ": ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression with", "top_level = expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\"", "class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test the top-level function", ") expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual =", "= expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ] actual", "expr = expression.set_items() expected = [ create_graph( SetItemObserver(notify=True, optional=False), ), ] actual =", "def test_trait_name(self): # Test the top-level function expr = expression.trait(\"name\") expected = [", "future. top_level = expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase):", "TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self): # Test the top-level function expr", "Test the top-level function expr = expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True,", "= expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def", "node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ),", "= [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "= expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 = 1 observer2 = 2 observer3", "), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ), ] actual = expr._as_graphs() self.assertEqual(actual,", "optional=False), ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): #", "= [ create_graph( SetItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False, optional=False), ), ]", "terms of the BSD # license included in LICENSE.txt and may be redistributed", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test the instance method", "instance method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected", "license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using", "ObserverGraph Returns ------- expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test", "expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual =", "TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr = expression.set_items() expected = [", "expected) def test_join_expressions(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2", "NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) combined1 = expr1", "create_expression(observer1) expr2 = create_expression(observer2) expr = expr1.then(expr2) expected = [ create_graph( observer1, observer2,", "calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [", "instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [", "expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual,", "expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test the top-level function expr = expression.metadata(\"butterfly\",", "expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items", "need to divert in the future. top_level = expression.match method = expression.ObserverExpression().match self.assertEqual(", "optional=False), ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): #", "expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test the instance method calls the top-level", "This software is provided without warranty under the terms of the BSD #", "SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self):", "create_expression(observer2) combined1 = expr1 | expr2 combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1])", "test_or_then_chained(self): observer1 = 1 observer2 = 2 observer3 = 3 observer4 = 4", "notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test", "expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False,", "DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr =", "NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): #", "def test_set_items(self): expr = expression.set_items() expected = [ create_graph( SetItemObserver(notify=True, optional=False), ), ]", "= expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected = [ create_graph(", "two need to divert in the future. top_level = expression.list_items method = expression.ObserverExpression().list_items", "expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs()", "from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\" Create an", "expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ),", "= [ create_graph( SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "top-level function expr = expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ),", "expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True,", "correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\",", "# is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought", "for testing purposes. Parameters ---------- observer : hashable Item to be used as", "notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "top-level function expr = expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ),", "instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected = [", "FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self):", "self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr", "test_list_items(self): expr = expression.list_items() expected = [ create_graph( ListItemObserver(notify=True, optional=False), ), ] actual", "= expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test the instance method calls the", "expected) def test_or_then_chained(self): observer1 = 1 observer2 = 2 observer3 = 3 observer4", "class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr = expression.dict_items() expected =", "expr2 = create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The following all result in", "nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer): \"\"\" Create an expression", "in the future. top_level = expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) )", "expected) def test_trait_method_optional_true(self): # Test the instance method calls the top-level function correctly.", "combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2", "# Test the top-level function expr = expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver(", "expression.trait(\"name\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual =", "= expr1 | expr2 combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self):", "create_graph( ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True)", "create_graph( DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr", "self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=True),", "ObserverGraph \"\"\" node = nodes[-1] graph = ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph", "TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 = create_expression(1) expr2 = create_expression(1)", "NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): #", "expression.join(expr1, expr2) expected = [ create_graph( observer1, observer2, ) ] actual = expr._as_graphs()", "= expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1 = 1 observer2 = 2 expr1", "[ create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self):", "= create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1 | expr2).then(expr3 | expr4) expected", "divert in the future. top_level = expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method)", "expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1 observer2 = 2 expr1 =", "def test_join_equality_with_then(self): # The following all result in the same graphs expr1 =", "expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test the top-level function expr = expression.trait(\"name\",", "create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "notify=True, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test", "Test the top-level function expr = expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True),", "# Test the top-level function expr = expression.trait(\"name\", optional=True) expected = [ create_graph(", "---------- *nodes : hashable Items to be attached as nodes Returns ------- ObserverGraph", "expected = [ create_graph( DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self):", "reserved. # # This software is provided without warranty under the terms of", "the top-level function expr = expression.trait(\"name\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False,", "expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test the instance method calls the top-level", "= create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The following all", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test the instance method calls", "without warranty under the terms of the BSD # license included in LICENSE.txt", "in the future. top_level = expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) )", "inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self): # Test the", "expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs()", "def create_graph(*nodes): \"\"\" Create an ObserverGraph with the given nodes joined one after", "self.assertEqual(actual, expected) def test_or_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1)", "notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): #", "signatures in-sync. # Remove this if the two need to divert in the", "graph = ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer): \"\"\" Create an expression with", "the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\",", "optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self):", "self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test the instance method calls the top-level function", "[ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self):", "create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self):", "expr = expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual", "Test ObserverExpression.match \"\"\" def setUp(self): def anytrait(name, trait): return True self.anytrait = anytrait", "function expr = expression.trait(\"name\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ),", "# Test the instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\")", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 = 1 observer2 =", "top-level function expr = expression.metadata(\"butterfly\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False,", "expr = expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=True), ), ] actual =", "function correctly. expr = expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True),", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test the top-level function expr", "on ObserverGraph Returns ------- expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\"", "self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The following all result in the same graphs", "= expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ),", "Test to help developers keeping the two function signatures in-sync. # Remove this", "= [ create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1 =", "= expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test the instance method calls the", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True)", "Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr = expression.set_items() expected = [ create_graph( SetItemObserver(notify=True,", "| expr2 combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 =", "Remove this if the two need to divert in the future. top_level =", ") class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 = create_expression(1) expr2", "self.assertEqual(actual, expected) def test_trait_method(self): # Test the instance method calls the top-level function", "traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer", "test_new_with_branches(self): observer = 1 expr = create_expression(observer) expected = [ create_graph(observer), ] actual", "expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2) def test_equality_different_type(self): expr = create_expression(1) self.assertNotEqual(expr,", "test_trait_method_notify_false(self): # Test the instance method calls the top-level function correctly. expr =", "will maintain the order provided by the user. observer1 = 1 observer2 =", "self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test the instance method calls the top-level function", ") class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr = expression.dict_items() expected", "2005-2021 Enthought, Inc., Austin, TX # All rights reserved. # # This software", "observers.\"\"\" def test_new_with_branches(self): observer = 1 expr = create_expression(observer) expected = [ create_graph(observer),", "\"\"\" Create an ObserverGraph with the given nodes joined one after another. Parameters", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test the instance method", "ObserverExpression.trait \"\"\" def test_trait_name(self): # Test the top-level function expr = expression.trait(\"name\") expected", "= expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): # Test the top-level function expr =", "test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False, optional=False), ), ] actual", "[ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr = expression.list_items() expected =", "= expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs()", "= expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs()", "graph = ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return graph", "= expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected = [ create_graph(", "def test_or_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 =", "= expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ] actual", "expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self):", "expected) def test_or_maintain_order(self): # Test __or__ will maintain the order provided by the", "to divert in the future. top_level = expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level),", "method calls the top-level function correctly. expr = expression.dict_items().dict_items(optional=True) expected = [ create_graph(", "True self.anytrait = anytrait def test_match_notify_true(self): # Test the top-level function expr =", "expr = create_expression(observer) expected = [ create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "top-level function correctly. expr = expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False,", "test_set_items(self): expr = expression.set_items() expected = [ create_graph( SetItemObserver(notify=True, optional=False), ), ] actual", "expr = expression.trait(\"name\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ]", "the instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected =", "All rights reserved. # # This software is provided without warranty under the", "top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected = [ create_graph(", "test_match_method_notify_false(self): # Test the instance method calls the top-level function correctly. expr =", "create_expression(observer): \"\"\" Create an expression with a dummy observer for testing purposes. Parameters", "= expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected = [ create_graph(", "[ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected = [", "[ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "function correctly. expr = expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False),", "create_graph( observer1, observer2, ), create_graph( observer3, observer4, ), ] actual = expr._as_graphs() self.assertEqual(actual,", "filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "expr = expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=True), ), ] actual =", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected =", "[ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=True), ),", "expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test __or__ will maintain the order provided", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test the instance method calls", "FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self):", "self.assertEqual(actual, expected) def test_join_expressions(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1)", "if the two need to divert in the future. top_level = expression.list_items method", "= 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr =", "expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test the instance method calls the top-level", "ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr =", "create_expression(observer1) expr2 = create_expression(observer2) expr = expression.join(expr1, expr2) expected = [ create_graph( observer1,", "create_expression(observer2) expr = expr1.then(expr2) expected = [ create_graph( observer1, observer2, ) ] actual", "top-level function correctly. expr = expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True,", "expected = [ create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1", "divert in the future. top_level = expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method)", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test the instance", "[ create_graph( ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self):", "instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected =", "= expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual =", "need to divert in the future. top_level = expression.set_items method = expression.ObserverExpression().set_items self.assertEqual(", "test_match_method_notify_true(self): # Test the instance method calls the top-level function correctly. expr =", "divert in the future. top_level = expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method)", "3 observer4 = 4 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3)", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test the", "def test_or_maintain_order(self): # Test __or__ will maintain the order provided by the user.", "expected) def test_metadata_method_notify_false(self): # Test the instance method calls the top-level function correctly.", "test_then_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2)", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected", "import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import", "= [ create_graph( DictItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "[ create_graph( observer1, observer2, ), create_graph( observer3, observer4, ), ] actual = expr._as_graphs()", "nodes[-1] graph = ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return", "expected) def test_trait_method_notify_false(self): # Test the instance method calls the top-level function correctly.", "filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): #", "expr = expr1.then(expr2) expected = [ create_graph( observer1, observer2, ) ] actual =", "create_graph( DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): #", "notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to", "NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): #", "create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1 | expr2).then(expr3 | expr4) expected =", "= expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs()", "expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "create_expression(observer2) expr = expr1 | expr2 expected = [ create_graph(observer1), create_graph(observer2), ] actual", "create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test the instance", "expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self):", "= 3 observer4 = 4 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr3 =", "def create_expression(observer): \"\"\" Create an expression with a dummy observer for testing purposes.", "= expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test the instance method calls the", "[ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "# Test the top-level function expr = expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait,", "from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from", "expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual", "expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs()", "FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test the instance method calls", "# Test the instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\",", "= expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test the top-level function expr =", "expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ]", "expr = expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False),", "optional=True) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): # Test the", "function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False),", "= expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs()", "[ create_graph( DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self):", "# The following all result in the same graphs expr1 = create_expression(1) expr2", "= create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected = [", "ObserverExpression.set_items \"\"\" def test_set_items(self): expr = expression.set_items() expected = [ create_graph( SetItemObserver(notify=True, optional=False),", ": hashable Item to be used as a node on ObserverGraph Returns -------", "create_graph( SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr", "observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) combined1 = expr1 |", "test_set_items_method_optional(self): # Test the instance method calls the top-level function correctly. expr =", "optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to", "ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 = create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2) def", "result in the same graphs expr1 = create_expression(1) expr2 = create_expression(2) combined1 =", "expr2 = create_expression(observer2) expr = expr1 | expr2 expected = [ create_graph(observer1), create_graph(observer2),", "ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self): observer = 1 expr = create_expression(observer) expected", "= ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return graph def", "one after another. Parameters ---------- *nodes : hashable Items to be attached as", "the top-level function expr = expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False)", "with generic observers.\"\"\" def test_new_with_branches(self): observer = 1 expr = create_expression(observer) expected =", "expr = expression.metadata(\"butterfly\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ),", "= expression.metadata(\"butterfly\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ]", "observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match", "http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! import inspect import unittest", "filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self): #", "two need to divert in the future. top_level = expression.dict_items method = expression.ObserverExpression().dict_items", "create_expression(observer) expected = [ create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self):", "expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__", "graphs expr1 = create_expression(1) expr2 = create_expression(2) combined1 = expression.join(expr1, expr2) combined2 =", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test the instance method", "FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test", "expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items", "top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False),", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected", "create_expression(2) combined1 = expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2) def test_equality_different_type(self): expr", "create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1 = 1 observer2", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test the instance method", "notify=False, ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual", "expected = [ create_graph( ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "create_expression(1) expr2 = create_expression(2) combined1 = expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2)", "[ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual =", "expression.trait(\"name\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual =", "def test_list_items_method_optional(self): # Test the instance method calls the top-level function correctly. expr", "self.anytrait = anytrait def test_match_notify_true(self): # Test the top-level function expr = expression.match(filter=self.anytrait)", "expected) def test_list_items_method_notify(self): # Test the instance method calls the top-level function correctly.", "= expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected = [ create_graph(", "filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): #", "create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1 | expr2).then(expr3 |", "TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr = expression.list_items() expected = [", "import ObserverGraph def create_graph(*nodes): \"\"\" Create an ObserverGraph with the given nodes joined", "expr2) expected = [ create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual,", "the top-level function correctly. expr = expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=False),", "# Thanks for using Enthought open source! import inspect import unittest from traits.observation", "NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): #", "notify=False, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test", "after another. Parameters ---------- *nodes : hashable Items to be attached as nodes", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): # Test the top-level function expr", "calls the top-level function correctly. expr = expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True,", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test the instance method", "notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self):", "expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "= create_expression(2) combined1 = expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2) def test_equality_different_type(self):", "expected) def test_metadata_notify_false(self): # Test the top-level function expr = expression.metadata(\"butterfly\", notify=False) expected", "rights reserved. # # This software is provided without warranty under the terms", "expected = [ create_graph( SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "LICENSE.txt and may be redistributed only under # the conditions described in the", "the top-level function correctly. expr = expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True, optional=False),", "= expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True,", "from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import MetadataFilter from", "create_expression(observer1) expr2 = create_expression(observer2) combined1 = expr1 | expr2 combined2 = expr2 |", "optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test the", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected", "test_join_expressions(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2)", "expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False, optional=False), ),", "top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"),", "the top-level function correctly. expr = expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=False),", "license included in LICENSE.txt and may be redistributed only under # the conditions", "two need to divert in the future. top_level = expression.metadata method = expression.ObserverExpression().metadata", "expr1 | expr2 combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1", "expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1 = 1 observer2 = 2 expr1 =", "test_chained_then_or(self): observer1 = 1 observer2 = 2 observer3 = 3 observer4 = 4", "] actual = expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def", "inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr = expression.set_items()", "method calls the top-level function correctly. expr = expression.dict_items().dict_items(notify=False) expected = [ create_graph(", "Test the instance method calls the top-level function correctly. expr = expression.set_items().set_items(notify=False) expected", "\"\"\" def test_dict_items(self): expr = expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True, optional=False), ),", "# Test __or__ will maintain the order provided by the user. observer1 =", "), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test the top-level function", "expr2).then(expr3 | expr4) expected = [ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ),", "observer for testing purposes. Parameters ---------- observer : hashable Item to be used", "node = nodes[-1] graph = ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph = ObserverGraph(node=node,", "expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): # Test the instance method calls the top-level", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test the top-level", "expr2 = create_expression(2) combined1 = expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2) def", "Test composition of ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self): observer = 1 expr", "the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph( FilteredTraitObserver(", "test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=True), ), ] actual", "expected = [ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[", "the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [ create_graph(", "inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr =", "Create an expression with a dummy observer for testing purposes. Parameters ---------- observer", "observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr", "), create_graph( observer3, observer4, ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self):", "expr = expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ]", "an ObserverGraph with the given nodes joined one after another. Parameters ---------- *nodes", "= 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expression.join(expr1, expr2) expected", "instance method calls the top-level function correctly. expr = expression.list_items().list_items(optional=True) expected = [", "expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ),", "expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ] actual =", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test the", "expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True,", "if the two need to divert in the future. top_level = expression.set_items method", "create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected = [ create_graph( observer1, observer2, ),", "= create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr =", "FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual = expr._as_graphs()", "method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\"", "expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False, optional=False), ),", "to be attached as nodes Returns ------- ObserverGraph \"\"\" node = nodes[-1] graph", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test the instance method", "def test_match_method_notify_true(self): # Test the instance method calls the top-level function correctly. expr", "test_trait_method(self): # Test the instance method calls the top-level function correctly. expr =", "= expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test the instance method calls the", "= anytrait def test_match_notify_true(self): # Test the top-level function expr = expression.match(filter=self.anytrait) expected", "def test_trait_method_notify_false(self): # Test the instance method calls the top-level function correctly. expr", "this if the two need to divert in the future. top_level = expression.metadata", "correctly. expr = expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ),", "notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual = expr._as_graphs()", "ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ],", "2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1.then(expr2) expected = [", "def test_dict_items_method_notify(self): # Test the instance method calls the top-level function correctly. expr", "expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs()", "NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): #", "the future. top_level = expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class", "method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\"", "class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self): observer", "FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver", "in the future. top_level = expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) )", "top_level = expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\"", "expr = expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ]", "\"\"\" def setUp(self): def anytrait(name, trait): return True self.anytrait = anytrait def test_match_notify_true(self):", "expr = expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ]", "= [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes):", "function expr = expression.trait(\"name\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ),", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test the instance method calls", "def test_match_notify_true(self): # Test the top-level function expr = expression.match(filter=self.anytrait) expected = [", "expected) def test_match_method_notify_false(self): # Test the instance method calls the top-level function correctly.", "included in LICENSE.txt and may be redistributed only under # the conditions described", "hashable Item to be used as a node on ObserverGraph Returns ------- expression", "top_level = expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\"", "create_expression(observer4) expr = (expr1 | expr2).then(expr3 | expr4) expected = [ ObserverGraph( node=observer1,", "= [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "function correctly. expr = expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True),", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected", "method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\"", "calls the top-level function correctly. expr = expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True,", "def test_metadata_method_notify_true(self): # Test the instance method calls the top-level function correctly. expr", ": hashable Items to be attached as nodes Returns ------- ObserverGraph \"\"\" node", "anytrait(name, trait): return True self.anytrait = anytrait def test_match_notify_true(self): # Test the top-level", "def test_join_expressions(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 =", "\"\"\" Test ObserverExpression.match \"\"\" def setUp(self): def anytrait(name, trait): return True self.anytrait =", "= [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "[ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual", "method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected = [", "self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test the instance method calls the top-level function", "test_list_items_method_optional(self): # Test the instance method calls the top-level function correctly. expr =", "= create_expression(observer4) expr = (expr1 | expr2).then(expr3 | expr4) expected = [ ObserverGraph(", "Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr = expression.list_items() expected = [ create_graph( ListItemObserver(notify=True,", "self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 = 1 observer2 = 2 observer3 = 3", "test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False, optional=False), ), ] actual", "= expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ] actual", "instance method calls the top-level function correctly. expr = expression.list_items().list_items(notify=False) expected = [", "calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\",", "top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True,", "self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test the instance method calls the top-level function", "expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test the instance method calls the top-level", "def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False, optional=False), ), ]", "expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items", "import ListItemObserver from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import", "expected) def test_chained_then_or(self): observer1 = 1 observer2 = 2 observer3 = 3 observer4", "test_metadata_notify_false(self): # Test the top-level function expr = expression.metadata(\"butterfly\", notify=False) expected = [", "top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True,", "create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "\"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test the top-level function expr =", "is provided without warranty under the terms of the BSD # license included", "a dummy observer for testing purposes. Parameters ---------- observer : hashable Item to", "Test the top-level function expr = expression.trait(\"name\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\",", "[ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1 = 1 observer2 = 2", "Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test the top-level function expr = expression.metadata(\"butterfly\")", "expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1.then(expr2) expected = [ create_graph(", "be attached as nodes Returns ------- ObserverGraph \"\"\" node = nodes[-1] graph =", "Test ObserverExpression.trait \"\"\" def test_trait_name(self): # Test the top-level function expr = expression.trait(\"name\")", "inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self): # Test", "= 1 expr = create_expression(observer) expected = [ create_graph(observer), ] actual = expr._as_graphs()", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected = [", "function expr = expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ]", "optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual = expr._as_graphs()", "test_trait_name_notify_false(self): # Test the top-level function expr = expression.trait(\"name\", notify=False) expected = [", "The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for", "observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 = 1", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test the top-level function", "create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "# This software is provided without warranty under the terms of the BSD", "conditions described in the aforementioned license. The license # is also available online", "traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter", "expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ),", "Returns ------- expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition", "def test_trait_name_optional_true(self): # Test the top-level function expr = expression.trait(\"name\", optional=True) expected =", "expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=True), ),", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test the instance method calls", "notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "this if the two need to divert in the future. top_level = expression.match", "expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual = expr._as_graphs()", "following all result in the same graphs expr1 = create_expression(1) expr2 = create_expression(2)", "Test the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected", "Test the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False)", "[ create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test", "expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait", "def test_trait_equality(self): expr1 = create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): #", "def test_then_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 =", "def test_set_items_method_notify(self): # Test the instance method calls the top-level function correctly. expr", "Test the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True)", "create_graph( SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): #", "optional=False), SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): #", "filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): #", "expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False,", "class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 = create_expression(1) expr2 =", "expr2 = create_expression(observer2) combined1 = expr1 | expr2 combined2 = expr2 | expr1", "correctly. expr = expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ),", "nodes joined one after another. Parameters ---------- *nodes : hashable Items to be", "method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected =", "source! import inspect import unittest from traits.observation import expression from traits.observation._dict_item_observer import DictItemObserver", "expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected", "[ create_graph( SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self):", "future. top_level = expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase):", "expr2 = create_expression(observer2) expr = expr1.then(expr2) expected = [ create_graph( observer1, observer2, )", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test the instance method calls", "expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True,", "the two need to divert in the future. top_level = expression.list_items method =", "children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ), ]", "ObserverGraph def create_graph(*nodes): \"\"\" Create an ObserverGraph with the given nodes joined one", ") class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self): # Test the top-level", "test_call_signatures(self): # Test to help developers keeping the two function signatures in-sync. #", "to divert in the future. top_level = expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level),", "= expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 = 1 observer2 = 2 observer3", "the instance method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False,", "def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=True), ), ]", "observer : hashable Item to be used as a node on ObserverGraph Returns", "notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): #", "DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self):", "the instance method calls the top-level function correctly. expr = expression.list_items().list_items(notify=False) expected =", "for node in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer): \"\"\"", "create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected", "the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected", "the top-level function expr = expression.match(filter=self.anytrait, notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False),", "notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test", "filter=self.anytrait, notify=False, ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ]", "expected) def test_match_method_notify_true(self): # Test the instance method calls the top-level function correctly.", "attached as nodes Returns ------- ObserverGraph \"\"\" node = nodes[-1] graph = ObserverGraph(node=node)", "ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test the top-level function expr = expression.metadata(\"butterfly\") expected", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected = [", "expected) def test_dict_items_method_notify(self): # Test the instance method calls the top-level function correctly.", "\"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr = expression.set_items() expected = [ create_graph(", "expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 = 1 observer2 = 2 observer3 =", "def test_call_signatures(self): # Test to help developers keeping the two function signatures in-sync.", "expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1 |", "1 expr = create_expression(observer) expected = [ create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual,", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected =", "from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from", "[ create_graph( ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self):", "instance method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, )", "filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): #", "DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test", "the same graphs expr1 = create_expression(1) expr2 = create_expression(2) combined1 = expression.join(expr1, expr2)", "method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph(", "Test the instance method calls the top-level function correctly. expr = expression.set_items().set_items(optional=True) expected", "return True self.anytrait = anytrait def test_match_notify_true(self): # Test the top-level function expr", "optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ]", "the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\",", "= [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual =", "Returns ------- ObserverGraph \"\"\" node = nodes[-1] graph = ObserverGraph(node=node) for node in", "calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph(", "and may be redistributed only under # the conditions described in the aforementioned", "the top-level function correctly. expr = expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True, optional=False),", "correctly. expr = expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ),", "this if the two need to divert in the future. top_level = expression.dict_items", "class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def setUp(self): def anytrait(name, trait): return True", "this if the two need to divert in the future. top_level = expression.list_items", "in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt", "method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected = [ create_graph(", "expression.metadata(\"butterfly\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual", "[ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual", "def test_trait_name_notify_false(self): # Test the top-level function expr = expression.trait(\"name\", notify=False) expected =", "Test the instance method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait", "in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer): \"\"\" Create an", "FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self):", "using Enthought open source! import inspect import unittest from traits.observation import expression from", "expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual = expr._as_graphs()", "test_dict_items_method_notify(self): # Test the instance method calls the top-level function correctly. expr =", "hashable Items to be attached as nodes Returns ------- ObserverGraph \"\"\" node =", "# Test the instance method calls the top-level function correctly. expr = expression.set_items().set_items(notify=False)", "inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr =", "Test the instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False)", "expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def setUp(self): def anytrait(name,", "optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self): # Test the", "self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test the instance method calls the top-level function", "the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected =", "the future. top_level = expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class", "| expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1 observer2 = 2 expr1", "self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=True),", "expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1 observer2 = 2", "the top-level function expr = expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ),", "node in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer): \"\"\" Create", "the two need to divert in the future. top_level = expression.set_items method =", "create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1.then(expr2))", "inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr = expression.list_items()", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1 = 1 observer2 = 2", "[ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual = expr._as_graphs()", "1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expression.join(expr1,", "expr4) expected = [ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2,", "= [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "Test the instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected", "expected) def test_dict_items_method_optional(self): # Test the instance method calls the top-level function correctly.", "= expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1 = 1 observer2 = 2 expr1", "= create_expression(1) expr2 = create_expression(2) combined1 = expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1,", "optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self):", "expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ] actual =", "def test_metadata_method_notify_false(self): # Test the instance method calls the top-level function correctly. expr", "the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt #", "may be redistributed only under # the conditions described in the aforementioned license.", "expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1 = 1 observer2 = 2 expr1 =", "create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! import", "notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test", "function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\",", "the instance method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait )", "create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1", "self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False, optional=False),", "instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected =", "to divert in the future. top_level = expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level),", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False)", "expected = [ create_graph( ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "trait): return True self.anytrait = anytrait def test_match_notify_true(self): # Test the top-level function", "as a node on ObserverGraph Returns ------- expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer)", "= expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs()", "Thanks for using Enthought open source! import inspect import unittest from traits.observation import", "calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph(", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test the", "expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs()", "Parameters ---------- observer : hashable Item to be used as a node on", "future. top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase):", "observer4 = 4 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4", "import expression from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import", "return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression with generic observers.\"\"\" def", "= expr1.then(expr2) expected = [ create_graph( observer1, observer2, ) ] actual = expr._as_graphs()", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): # Test the top-level function", "function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True,", "= expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test the instance method calls the", "Test the instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(optional=True) expected", "ObserverGraph with the given nodes joined one after another. Parameters ---------- *nodes :", "expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual", "top-level function correctly. expr = expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True,", "to divert in the future. top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait),", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True)", "to be used as a node on ObserverGraph Returns ------- expression : ObserverExpression", "expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test the instance method calls the top-level", ") ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 = 1 observer2", "expected = [ create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "# Test the top-level function expr = expression.match(filter=self.anytrait, notify=False) expected = [ create_graph(", "expr = expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ]", "= expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test the top-level function expr =", "FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self):", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 = 1 observer2", "expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False, optional=False), ),", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected = [", "expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ),", "(expr3.then(expr4)) expected = [ create_graph( observer1, observer2, ), create_graph( observer3, observer4, ), ]", "expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual = expr._as_graphs() self.assertEqual(actual,", "expected) def test_set_items_method_notify(self): # Test the instance method calls the top-level function correctly.", "of the BSD # license included in LICENSE.txt and may be redistributed only", "Test the instance method calls the top-level function correctly. expr = expression.list_items().list_items(optional=True) expected", "= 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1 | expr2", "= expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def", "def test_set_items_optional_true(self): expr = expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=True), ), ]", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test the instance method", "= 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) combined1 =", "instance method calls the top-level function correctly. expr = expression.set_items().set_items(notify=False) expected = [", "with the given nodes joined one after another. Parameters ---------- *nodes : hashable", "the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected", "node on ObserverGraph Returns ------- expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase):", "= expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test the instance method calls the", "the top-level function expr = expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True,", "= ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer): \"\"\" Create an expression with a", "test_or_maintain_order(self): # Test __or__ will maintain the order provided by the user. observer1", ") class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr = expression.list_items() expected", "the future. top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test the instance method", "notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "= create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected = [ create_graph( observer1, observer2,", "expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1 | expr2).then(expr3 | expr4)", "expected = [ create_graph( SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test the instance method calls the top-level", "expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "= expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): # Test the instance method calls the", "self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test the instance method calls the top-level function", "1 observer2 = 2 observer3 = 3 observer4 = 4 expr1 = create_expression(observer1)", "expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test the instance method calls the top-level", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test the top-level function", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected =", "purposes. Parameters ---------- observer : hashable Item to be used as a node", "expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1.then(expr2)) |", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): # Test the instance method calls", "optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test the", "expected) def test_match_notify_false(self): # Test the top-level function expr = expression.match(filter=self.anytrait, notify=False) expected", "expr = expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=True), ), ] actual =", "SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test", "= create_expression(observer) expected = [ create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "optional=False), DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): #", "optional=False), DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): #", "ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test", "expr1.then(expr2) expected = [ create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual,", "expr2) def test_join_equality_with_then(self): # The following all result in the same graphs expr1", "expr = expression.join(expr1, expr2) expected = [ create_graph( observer1, observer2, ) ] actual", "create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): #", "described in the aforementioned license. The license # is also available online at", "the instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(optional=True) expected =", "used as a node on ObserverGraph Returns ------- expression : ObserverExpression \"\"\" return", "testing purposes. Parameters ---------- observer : hashable Item to be used as a", "warranty under the terms of the BSD # license included in LICENSE.txt and", "the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True,", "expr = expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual =", "= expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test", "test_or_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2)", "# Test the instance method calls the top-level function correctly. expr = expression.list_items().list_items(optional=True)", "ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr = expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True, optional=False),", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test the top-level", "calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected =", "license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks", "import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import", "Test the top-level function expr = expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"),", "correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True),", "= expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False),", "notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual =", "2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1 | expr2 expected", "in the same graphs expr1 = create_expression(1) expr2 = create_expression(2) combined1 = expression.join(expr1,", "Item to be used as a node on ObserverGraph Returns ------- expression :", "observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 =", "expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=True), ),", "self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test the top-level function expr = expression.metadata(\"butterfly\", notify=False)", "expr = expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ]", "create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self):", "need to divert in the future. top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual(", "import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\"", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected = [", "= [ create_graph( DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): # Test the top-level", "dummy observer for testing purposes. Parameters ---------- observer : hashable Item to be", "\"\"\" def test_metadata_notify_true(self): # Test the top-level function expr = expression.metadata(\"butterfly\") expected =", "The following all result in the same graphs expr1 = create_expression(1) expr2 =", "correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait,", "= create_expression(observer1) expr2 = create_expression(observer2) expr = expr1 | expr2 expected = [", "from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\" Create an ObserverGraph with the given", "= (expr1 | expr2).then(expr3 | expr4) expected = [ ObserverGraph( node=observer1, children=[ create_graph(observer3),", "create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "correctly. expr = expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True,", "the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected = [", "in-sync. # Remove this if the two need to divert in the future.", "expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self):", "create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual =", "[ create_graph( DictItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self):", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test the instance", "instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\", optional=True) expected =", "| expr4) expected = [ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph(", "= expression.trait(\"name\").trait(\"attr\") expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ),", "the future. top_level = expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class", "expr = expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False, optional=False), ), ] actual =", "generic observers.\"\"\" def test_new_with_branches(self): observer = 1 expr = create_expression(observer) expected = [", "the instance method calls the top-level function correctly. expr = expression.list_items().list_items(optional=True) expected =", "expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 = 1 observer2 = 2 observer3 =", "FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test", "= expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test the instance method calls the", "expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self):", "create_graph(*nodes): \"\"\" Create an ObserverGraph with the given nodes joined one after another.", "composition of ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self): observer = 1 expr =", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test the instance method calls", "top-level function expr = expression.trait(\"name\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True)", "# Remove this if the two need to divert in the future. top_level", "4 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4)", "def test_list_items(self): expr = expression.list_items() expected = [ create_graph( ListItemObserver(notify=True, optional=False), ), ]", "function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True),", "return graph def create_expression(observer): \"\"\" Create an expression with a dummy observer for", "expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test the", "self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test the instance method calls the top-level function", "order provided by the user. observer1 = 1 observer2 = 2 expr1 =", "= expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test the instance method calls the", "[ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual = expr._as_graphs()", "= [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "the two need to divert in the future. top_level = expression.metadata method =", "observer2, ), create_graph( observer3, observer4, ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "Test the top-level function expr = expression.match(filter=self.anytrait, notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait,", "= expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2) def test_equality_different_type(self): expr = create_expression(1)", ") expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual =", "expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1 | expr2 expected =", "# Test the top-level function expr = expression.trait(\"name\") expected = [ create_graph( NamedTraitObserver(name=\"name\",", "in LICENSE.txt and may be redistributed only under # the conditions described in", "correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver(", "def test_match_notify_false(self): # Test the top-level function expr = expression.match(filter=self.anytrait, notify=False) expected =", "Inc., Austin, TX # All rights reserved. # # This software is provided", "Remove this if the two need to divert in the future. top_level_trait =", "[ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "\"\"\" Create an expression with a dummy observer for testing purposes. Parameters ----------", "expr = expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ]", "the user. observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 =", "| expr2 expected = [ create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "[ create_graph( ListItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self):", "self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1", "Parameters ---------- *nodes : hashable Items to be attached as nodes Returns -------", "expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def setUp(self): def anytrait(name, trait): return", "function expr = expression.match(filter=self.anytrait, notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ]", "to help developers keeping the two function signatures in-sync. # Remove this if", "= expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs()", "= 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) combined1 = expr1 | expr2", "from traits.observation import expression from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to help developers", "expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata", "= create_expression(observer1) expr2 = create_expression(observer2) expr = expr1.then(expr2) expected = [ create_graph( observer1,", "expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver(", "expr = expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ]", "self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): #", "keeping the two function signatures in-sync. # Remove this if the two need", "expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionTrait(unittest.TestCase): \"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self):", "\"\"\" def test_trait_equality(self): expr1 = create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self):", "observer = 1 expr = create_expression(observer) expected = [ create_graph(observer), ] actual =", "def test_set_items_method_optional(self): # Test the instance method calls the top-level function correctly. expr", "setUp(self): def anytrait(name, trait): return True self.anytrait = anytrait def test_match_notify_true(self): # Test", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test the", "\"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 = create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1,", "the two need to divert in the future. top_level_trait = expression.trait method_trait =", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected =", "Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 = create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2)", "test_metadata_method_notify_false(self): # Test the instance method calls the top-level function correctly. expr =", "# Test the instance method calls the top-level function correctly. expr = expression.trait(\"name\").trait(\"attr\",", "notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test", "expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test the instance method calls the top-level", "be redistributed only under # the conditions described in the aforementioned license. The", "# Test the instance method calls the top-level function correctly. expr = expression.set_items().set_items(optional=True)", "the instance method calls the top-level function correctly. expr = expression.set_items().set_items(optional=True) expected =", "open source! import inspect import unittest from traits.observation import expression from traits.observation._dict_item_observer import", "= expression.trait(\"name\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual", "= [ create_graph( DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "children=[ create_graph(observer3), create_graph(observer4), ], ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self):", "MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import ObserverGraph", "= expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1 observer2 =", "if the two need to divert in the future. top_level_trait = expression.trait method_trait", "expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual", "expr4 = create_expression(observer4) expr = (expr1 | expr2).then(expr3 | expr4) expected = [", "calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph(", "= expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ] actual", "instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(optional=True) expected = [", "__or__ will maintain the order provided by the user. observer1 = 1 observer2", "Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr = expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True,", "TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression with generic observers.\"\"\" def test_new_with_branches(self): observer =", "TX # All rights reserved. # # This software is provided without warranty", "------- expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of", "expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression", "\"\"\" Test ObserverExpression.trait \"\"\" def test_trait_name(self): # Test the top-level function expr =", "user. observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2)", "create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ), ] actual =", "Test __or__ will maintain the order provided by the user. observer1 = 1", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test the instance", "function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait,", "\"\"\" Test ObserverExpression.dict_items \"\"\" def test_dict_items(self): expr = expression.dict_items() expected = [ create_graph(", "\"\"\" def test_trait_name(self): # Test the top-level function expr = expression.trait(\"name\") expected =", "ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_optional(self):", "test_trait_name(self): # Test the top-level function expr = expression.trait(\"name\") expected = [ create_graph(", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 = 1 observer2 = 2", "ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test", "Test the instance method calls the top-level function correctly. expr = expression.list_items().list_items(notify=False) expected", "expected = [ create_graph( SetItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "self.assertEqual(actual, expected) def test_trait_name_optional_true(self): # Test the top-level function expr = expression.trait(\"name\", optional=True)", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test the instance", "optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): # Test the", "test_match_notify_false(self): # Test the top-level function expr = expression.match(filter=self.anytrait, notify=False) expected = [", "test_dict_items(self): expr = expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True, optional=False), ), ] actual", "the top-level function expr = expression.trait(\"name\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True,", "test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False, optional=False), ), ] actual", "Test the top-level function expr = expression.metadata(\"butterfly\", notify=False) expected = [ create_graph( FilteredTraitObserver(", "[ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "expected) def test_trait_name_notify_false(self): # Test the top-level function expr = expression.trait(\"name\", notify=False) expected", "divert in the future. top_level = expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method)", "calls the top-level function correctly. expr = expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True,", "expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ),", "function correctly. expr = expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True),", "the order provided by the user. observer1 = 1 observer2 = 2 expr1", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False)", "= expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test the instance method calls the", "optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test the", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test __or__ will maintain", "expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs()", "expected = [ create_graph( DictItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test the top-level", "Enthought open source! import inspect import unittest from traits.observation import expression from traits.observation._dict_item_observer", "inspect import unittest from traits.observation import expression from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer", "expr = expression.dict_items() expected = [ create_graph( DictItemObserver(notify=True, optional=False), ), ] actual =", "create_graph( ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_method_notify(self): #", "inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 =", "test_trait_method_optional_true(self): # Test the instance method calls the top-level function correctly. expr =", "= expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False),", "= [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "= expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"),", "function correctly. expr = expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False),", "expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ), ), ] actual = expr._as_graphs()", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test the top-level function expr", "expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=False, optional=False), ), ] actual =", "notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "combined1 = expr1 | expr2 combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def", "method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match( filter=self.anytrait, notify=False, ) expected", "create_expression(observer2) expr = expression.join(expr1, expr2) expected = [ create_graph( observer1, observer2, ) ]", "1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1.then(expr2)", "need to divert in the future. top_level = expression.metadata method = expression.ObserverExpression().metadata self.assertEqual(", "[ create_graph( SetItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self):", "self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=False, optional=False),", "correctly. expr = expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ),", "= [ create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to help", "this if the two need to divert in the future. top_level = expression.set_items", "FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual = expr._as_graphs()", "as nodes Returns ------- ObserverGraph \"\"\" node = nodes[-1] graph = ObserverGraph(node=node) for", "observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test", "observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expression.join(expr1, expr2)", "import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test the instance method", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_optional_true(self): # Test the", "self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr = expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False, optional=False),", "= expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_notify_false(self): expr = expression.dict_items(notify=False) expected = [ create_graph(", "), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "the two function signatures in-sync. # Remove this if the two need to", "notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test the instance method calls", "need to divert in the future. top_level = expression.list_items method = expression.ObserverExpression().list_items self.assertEqual(", "ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"),", "expression.match(filter=self.anytrait, notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs()", "two function signatures in-sync. # Remove this if the two need to divert", "# license included in LICENSE.txt and may be redistributed only under # the", "= [ create_graph( SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "anytrait def test_match_notify_true(self): # Test the top-level function expr = expression.match(filter=self.anytrait) expected =", "], ), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ), ] actual = expr._as_graphs()", "function expr = expression.match(filter=self.anytrait) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual", "observer2 = 2 observer3 = 3 observer4 = 4 expr1 = create_expression(observer1) expr2", "= [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ]", "# Test the instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(optional=True)", "import unittest from traits.observation import expression from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import", "create_graph( observer3, observer4, ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1", "provided without warranty under the terms of the BSD # license included in", "TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test the top-level function expr", "= [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "future. top_level = expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase):", "expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test the instance method calls the top-level", "= [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "function correctly. expr = expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False),", "observer1 = 1 observer2 = 2 observer3 = 3 observer4 = 4 expr1", "= [ create_graph( ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test the instance method calls", "optional=False), SetItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): #", "self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def test_list_items(self): expr", "# Test the instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(notify=False)", "under # the conditions described in the aforementioned license. The license # is", "calls the top-level function correctly. expr = expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True,", "= [ create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): #", "import inspect import unittest from traits.observation import expression from traits.observation._dict_item_observer import DictItemObserver from", "= nodes[-1] graph = ObserverGraph(node=node) for node in nodes[:-1][::-1]: graph = ObserverGraph(node=node, children=[graph])", "in the future. top_level = expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) )", "= [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "expression from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver", "observer3, observer4, ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 =", "expr2 expected = [ create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "= expression.dict_items method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test", "inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test the", "= expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def", "= create_expression(observer1) expr2 = create_expression(observer2) expr = expression.join(expr1, expr2) expected = [ create_graph(", "combined1 = expression.join(expr1, expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2) def test_equality_different_type(self): expr =", "ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression with generic", "self.assertEqual(actual, expected) def test_call_signatures(self): # Test to help developers keeping the two function", "= [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test __or__", "FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self):", "create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected =", "another. Parameters ---------- *nodes : hashable Items to be attached as nodes Returns", "= [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ] actual =", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method(self): # Test the instance method", "expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr", "two need to divert in the future. top_level_trait = expression.trait method_trait = expression.ObserverExpression().trait", "to divert in the future. top_level = expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level),", "FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self):", "= expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_notify_false(self): # Test the top-level function expr =", "optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False)", "[ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "= [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ), ]", "expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "= expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs()", "self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1 observer2 = 2 expr1 = create_expression(observer1)", "inspect.signature(method) ) class TestObserverExpressionEquality(unittest.TestCase): \"\"\" Test ObserverExpression.__eq__ \"\"\" def test_trait_equality(self): expr1 = create_expression(1)", "= expression.trait(\"name\").trait(\"attr\", optional=True) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True),", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test the instance", "expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected =", "expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to help developers keeping the two", "by the user. observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2", "= create_expression(observer2) expr3 = create_expression(observer3) expr4 = create_expression(observer4) expr = (expr1.then(expr2)) | (expr3.then(expr4))", "expr = expression.trait(\"name\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ]", "= expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test the instance method calls the", "if the two need to divert in the future. top_level = expression.metadata method", "= expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to help developers keeping the", "expr = expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True),", "expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False,", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test the instance", "expression.set_items() expected = [ create_graph( SetItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "combined2 = expr2 | expr1 self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1]) def test_then_operator(self): observer1 = 1 observer2", "= expression.trait(\"name\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual", "unittest from traits.observation import expression from traits.observation._dict_item_observer import DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver", "ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self):", "expression.set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "children=[graph]) return graph def create_expression(observer): \"\"\" Create an expression with a dummy observer", "the two need to divert in the future. top_level = expression.match method =", "for using Enthought open source! import inspect import unittest from traits.observation import expression", "create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "test_match_notify_true(self): # Test the top-level function expr = expression.match(filter=self.anytrait) expected = [ create_graph(", "only under # the conditions described in the aforementioned license. The license #", "aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # #", "notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=False, optional=False), ), ]", "traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\" Create an ObserverGraph with the given nodes", "actual = expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def setUp(self):", "[ ObserverGraph( node=observer1, children=[ create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4),", "notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): # Test the", "the top-level function correctly. expr = expression.set_items().set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=True, optional=False),", "SetItemObserver from traits.observation._observer_graph import ObserverGraph def create_graph(*nodes): \"\"\" Create an ObserverGraph with the", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected =", "= expression.match(filter=self.anytrait, notify=False) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual =", "the top-level function expr = expression.metadata(\"butterfly\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"),", "function expr = expression.metadata(\"butterfly\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=True, ), ),", "the instance method calls the top-level function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected", "create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\"", "------- ObserverGraph \"\"\" node = nodes[-1] graph = ObserverGraph(node=node) for node in nodes[:-1][::-1]:", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_false(self): # Test the instance method", "TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def setUp(self): def anytrait(name, trait): return True self.anytrait", "= [ create_graph( ListItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test the instance method calls the top-level function", "expr1 | expr2 expected = [ create_graph(observer1), create_graph(observer2), ] actual = expr._as_graphs() self.assertEqual(actual,", "two need to divert in the future. top_level = expression.match method = expression.ObserverExpression().match", "def test_new_with_branches(self): observer = 1 expr = create_expression(observer) expected = [ create_graph(observer), ]", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test the instance", "Create an ObserverGraph with the given nodes joined one after another. Parameters ----------", "traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer", "expr = expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False, optional=False), ), ] actual =", "create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): #", "def test_dict_items_method_optional(self): # Test the instance method calls the top-level function correctly. expr", "future. top_level = expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase):", "ObserverExpression.list_items \"\"\" def test_list_items(self): expr = expression.list_items() expected = [ create_graph( ListItemObserver(notify=True, optional=False),", "= expression.list_items().list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ] actual", "is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open", "create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\"", "ObserverGraph(node=node, children=[graph]) return graph def create_expression(observer): \"\"\" Create an expression with a dummy", "to divert in the future. top_level = expression.list_items method = expression.ObserverExpression().list_items self.assertEqual( inspect.signature(top_level),", "1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1", "# # Thanks for using Enthought open source! import inspect import unittest from", "software is provided without warranty under the terms of the BSD # license", "at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! import inspect import", "an expression with a dummy observer for testing purposes. Parameters ---------- observer :", "= expression.join(expr1, expr2) expected = [ create_graph( observer1, observer2, ) ] actual =", "def test_trait_method(self): # Test the instance method calls the top-level function correctly. expr", "method = expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\"", "expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_notify_false(self): expr = expression.list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=False,", "traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph", "expr1 = create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The following", "create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The following all result in the same", "expected = [ create_graph( DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "top-level function correctly. expr = expression.list_items().list_items(notify=False) expected = [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=False,", "create_expression(observer1) expr2 = create_expression(observer2) expr = expr1 | expr2 expected = [ create_graph(observer1),", "create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected = [ create_graph( ListItemObserver(notify=True, optional=True), ), ]", "[ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=True), ), ] actual = expr._as_graphs()", "DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test", "def test_chained_then_or(self): observer1 = 1 observer2 = 2 observer3 = 3 observer4 =", "# Test the instance method calls the top-level function correctly. expr = expression.match(filter=self.anytrait).match(", "a node on ObserverGraph Returns ------- expression : ObserverExpression \"\"\" return expression.SingleObserverExpression(observer) class", "# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX # All rights reserved. #", "expected = [ create_graph( observer1, observer2, ), create_graph( observer3, observer4, ), ] actual", "observer4, ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 = 1", "maintain the order provided by the user. observer1 = 1 observer2 = 2", "expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=True, ), ),", "observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expr1.then(expr2) expected", "inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test", "= expression.set_items() expected = [ create_graph( SetItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs()", "the instance method calls the top-level function correctly. expr = expression.dict_items().dict_items(notify=False) expected =", "observer1 = 1 observer2 = 2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) combined1", "expr1 = create_expression(observer1) expr2 = create_expression(observer2) combined1 = expr1 | expr2 combined2 =", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test to help developers keeping", "expected) def test_set_items_method_optional(self): # Test the instance method calls the top-level function correctly.", "expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs()", "expr2) combined2 = expr1.then(expr2) self.assertEqual(combined1, combined2) def test_equality_different_type(self): expr = create_expression(1) self.assertNotEqual(expr, \"1\")", "SetItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr =", "node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "def test_metadata_notify_true(self): # Test the top-level function expr = expression.metadata(\"butterfly\") expected = [", "top-level function correctly. expr = expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True,", "redistributed only under # the conditions described in the aforementioned license. The license", "correctly. expr = expression.metadata(\"bee\").metadata(\"ant\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ),", "*nodes : hashable Items to be attached as nodes Returns ------- ObserverGraph \"\"\"", "= [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\", notify=True, optional=False), ), ] actual =", "DictItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_notify(self): # Test", "= [ create_graph( ListItemObserver(notify=True, optional=False), ListItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "(expr1.then(expr2)) | (expr3.then(expr4)) expected = [ create_graph( observer1, observer2, ), create_graph( observer3, observer4,", "expr = (expr1.then(expr2)) | (expr3.then(expr4)) expected = [ create_graph( observer1, observer2, ), create_graph(", "\"\"\" return expression.SingleObserverExpression(observer) class TestObserverExpressionComposition(unittest.TestCase): \"\"\" Test composition of ObserverExpression with generic observers.\"\"\"", "[ create_graph( SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_notify(self):", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected = [", "expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "# # This software is provided without warranty under the terms of the", "self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 = 1 observer2 = 2 observer3 = 3", "= expression.trait method_trait = expression.ObserverExpression().trait self.assertEqual( inspect.signature(top_level_trait), inspect.signature(method_trait) ) class TestObserverExpressionDictItem(unittest.TestCase): \"\"\" Test", "the BSD # license included in LICENSE.txt and may be redistributed only under", "same graphs expr1 = create_expression(1) expr2 = create_expression(2) combined1 = expression.join(expr1, expr2) combined2", "filter=self.anytrait ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual", "), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test the instance", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_method_optional(self): # Test the instance method calls", "2 expr1 = create_expression(observer1) expr2 = create_expression(observer2) combined1 = expr1 | expr2 combined2", "= expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ] actual", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1 = 1 observer2 =", "DictItemObserver from traits.observation._filtered_trait_observer import FilteredTraitObserver from traits.observation._list_item_observer import ListItemObserver from traits.observation._metadata_filter import MetadataFilter", "help developers keeping the two function signatures in-sync. # Remove this if the", "expr = (expr1 | expr2).then(expr3 | expr4) expected = [ ObserverGraph( node=observer1, children=[", "expr = expression.set_items(notify=False) expected = [ create_graph( SetItemObserver(notify=False, optional=False), ), ] actual =", "two need to divert in the future. top_level = expression.set_items method = expression.ObserverExpression().set_items", "self.assertEqual(actual, expected) def test_list_items_method_optional(self): # Test the instance method calls the top-level function", "expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr = expression.join(expr1, expr2) expected = [", "BSD # license included in LICENSE.txt and may be redistributed only under #", "notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_metadata_method_notify_true(self): # Test", "test_dict_items_method_optional(self): # Test the instance method calls the top-level function correctly. expr =", "[ create_graph(observer), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_operator(self): observer1 = 1", "[ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=True) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_list_items_optional_true(self): expr = expression.list_items(optional=True) expected = [", "expression.match(filter=self.anytrait).match( filter=self.anytrait ) expected = [ create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=True), ), ]", "expression.list_items() expected = [ create_graph( ListItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual,", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_method_notify_true(self): # Test the instance method", "---------- observer : hashable Item to be used as a node on ObserverGraph", "create_graph( SetItemObserver(notify=True, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_notify_false(self): expr", ") class TestObserverExpressionSetItem(unittest.TestCase): \"\"\" Test ObserverExpression.set_items \"\"\" def test_set_items(self): expr = expression.set_items() expected", "observer3 = 3 observer4 = 4 expr1 = create_expression(observer1) expr2 = create_expression(observer2) expr3", "# Test the instance method calls the top-level function correctly. expr = expression.list_items().list_items(notify=False)", "function correctly. expr = expression.metadata(\"bee\").metadata(\"ant\") expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ),", "method calls the top-level function correctly. expr = expression.set_items().set_items(notify=False) expected = [ create_graph(", "SetItemObserver(notify=True, optional=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test", "observer1, observer2, ), create_graph( observer3, observer4, ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "notify=False, ), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_call_signatures(self): # Test", "expr._as_graphs() self.assertEqual(actual, expected) def test_dict_items_optional_true(self): expr = expression.dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True,", "correctly. expr = expression.trait(\"name\").trait(\"attr\", notify=False) expected = [ create_graph( NamedTraitObserver(name=\"name\", notify=True, optional=False), NamedTraitObserver(name=\"attr\",", "[ create_graph( observer1, observer2, ) ] actual = expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase):", "expression.dict_items().dict_items(optional=True) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=True, optional=True), ), ] actual =", "NamedTraitObserver(name=\"name\", notify=False, optional=False) ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_optional_true(self): #", "the future. top_level = expression.metadata method = expression.ObserverExpression().metadata self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class", "def test_list_items_method_notify(self): # Test the instance method calls the top-level function correctly. expr", "= expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def setUp(self): def", "= expression.ObserverExpression().dict_items self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class TestObserverExpressionListItem(unittest.TestCase): \"\"\" Test ObserverExpression.list_items \"\"\" def", "def test_metadata_notify_false(self): # Test the top-level function expr = expression.metadata(\"butterfly\", notify=False) expected =", "the given nodes joined one after another. Parameters ---------- *nodes : hashable Items", "with a dummy observer for testing purposes. Parameters ---------- observer : hashable Item", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_set_items_method_optional(self): # Test the instance method", "self.assertEqual(actual, expected) def test_match_method_notify_false(self): # Test the instance method calls the top-level function", "expr._as_graphs() self.assertEqual(actual, expected) def test_trait_name_notify_false(self): # Test the top-level function expr = expression.trait(\"name\",", "from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from", "notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"bee\"), notify=True, ), FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"ant\"), notify=False, ),", "from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver from traits.observation._observer_graph import ObserverGraph def", "expected) def test_call_signatures(self): # Test to help developers keeping the two function signatures", "correctly. expr = expression.dict_items().dict_items(notify=False) expected = [ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ),", "actual = expr._as_graphs() self.assertEqual(actual, expected) def test_chained_then_or(self): observer1 = 1 observer2 = 2", "correctly. expr = expression.set_items().set_items(optional=True) expected = [ create_graph( SetItemObserver(notify=True, optional=False), SetItemObserver(notify=True, optional=True), ),", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_or_then_chained(self): observer1 = 1 observer2 =", "create_graph(observer3), create_graph(observer4), ], ), ObserverGraph( node=observer2, children=[ create_graph(observer3), create_graph(observer4), ], ), ] actual", ") class TestObserverExpressionFilterMetadata(unittest.TestCase): \"\"\" Test ObserverExpression.metadata \"\"\" def test_metadata_notify_true(self): # Test the top-level", "create_graph( FilteredTraitObserver(filter=self.anytrait, notify=True), FilteredTraitObserver(filter=self.anytrait, notify=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def", "= create_expression(observer2) expr = expression.join(expr1, expr2) expected = [ create_graph( observer1, observer2, )", "= create_expression(observer1) expr2 = create_expression(observer2) combined1 = expr1 | expr2 combined2 = expr2", "FilteredTraitObserver(filter=self.anytrait, notify=True), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_match_notify_false(self): # Test", "self.assertEqual(actual, expected) def test_match_notify_false(self): # Test the top-level function expr = expression.match(filter=self.anytrait, notify=False)", "] actual = expr._as_graphs() self.assertEqual(actual, expected) def test_join_expressions(self): observer1 = 1 observer2 =", "test_trait_equality(self): expr1 = create_expression(1) expr2 = create_expression(1) self.assertEqual(expr1, expr2) def test_join_equality_with_then(self): # The", "self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\" def setUp(self): def anytrait(name, trait):", "divert in the future. top_level = expression.set_items method = expression.ObserverExpression().set_items self.assertEqual( inspect.signature(top_level), inspect.signature(method)", "function expr = expression.metadata(\"butterfly\", notify=False) expected = [ create_graph( FilteredTraitObserver( filter=MetadataFilter(metadata_name=\"butterfly\"), notify=False, ),", "= expr._as_graphs() self.assertEqual(actual, expected) def test_or_maintain_order(self): # Test __or__ will maintain the order", "expected) def test_trait_name_optional_true(self): # Test the top-level function expr = expression.trait(\"name\", optional=True) expected", "the future. top_level = expression.match method = expression.ObserverExpression().match self.assertEqual( inspect.signature(top_level), inspect.signature(method) ) class", "expr._as_graphs() self.assertEqual(actual, expected) def test_trait_method_notify_false(self): # Test the instance method calls the top-level", "Copyright 2005-2021 Enthought, Inc., Austin, TX # All rights reserved. # # This", ") ] actual = expr._as_graphs() self.assertEqual(actual, expected) class TestObserverExpressionFilter(unittest.TestCase): \"\"\" Test ObserverExpression.match \"\"\"", "ListItemObserver from traits.observation._metadata_filter import MetadataFilter from traits.observation._named_trait_observer import NamedTraitObserver from traits.observation._set_item_observer import SetItemObserver", "[ create_graph( DictItemObserver(notify=True, optional=False), DictItemObserver(notify=False, optional=False), ), ] actual = expr._as_graphs() self.assertEqual(actual, expected)", "(C) Copyright 2005-2021 Enthought, Inc., Austin, TX # All rights reserved. # #", "joined one after another. Parameters ---------- *nodes : hashable Items to be attached", "provided by the user. observer1 = 1 observer2 = 2 expr1 = create_expression(observer1)" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response", "google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_final_response assert", "KIND, either express or implied. # See the License for the specific language", "google.auth.transport.requests from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self):", "Unless required by applicable law or agreed to in writing, software # distributed", "MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses = responses self.requests = [] self.headers =", "def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "self.token def before_request(self, request, method, url, headers): self.apply(headers) def refresh(self, request): self.token +=", "self.responses = responses self.requests = [] self.headers = headers or {} def send(self,", "make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET',", "headers['authorization'] = self.token def before_request(self, request, method, url, headers): self.apply(headers) def refresh(self, request):", "License. # You may obtain a copy of the License at # #", "google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert", "__init__(self, responses, headers=None): self.responses = responses self.requests = [] self.headers = headers or", "authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_final_response assert mock_credentials.before_request.call_count ==", "language governing permissions and # limitations under the License. import mock import requests", "test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials =", "= token def apply(self, headers): headers['authorization'] = self.token def before_request(self, request, method, url,", "send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code", "== mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url", "responses self.requests = [] self.headers = headers or {} def send(self, request, **kwargs):", "assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert mock_adapter.requests[1].url == self.TEST_URL assert", "law or agreed to in writing, software # distributed under the License is", "import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock()", "the License for the specific language governing permissions and # limitations under the", "requests.adapters from six.moves import http_client import google.auth.transport.requests from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests):", "mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url == self.TEST_URL assert", "import requests.adapters from six.moves import http_client import google.auth.transport.requests from tests.transport import compliance class", "MockCredentials(object): def __init__(self, token='token'): self.token = token def apply(self, headers): headers['authorization'] = self.token", "compliance with the License. # You may obtain a copy of the License", "response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert", "def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First request will 401,", "= google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_response", "mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First request will 401, second request will succeed.", "limitations under the License. import mock import requests import requests.adapters from six.moves import", "under the License. import mock import requests import requests.adapters from six.moves import http_client", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def __init__(self, token='token'):", "def refresh(self, request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses", "this file except in compliance with the License. # You may obtain a", "data=None): response = requests.Response() response.status_code = status response._content = data return response class", "TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials ==", "= make_response(status=http_client.OK) # First request will 401, second request will succeed. mock_adapter =", "= self.token def before_request(self, request, method, url, headers): self.apply(headers) def refresh(self, request): self.token", "2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization']", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "url, headers): self.apply(headers) def refresh(self, request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self,", "assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter", "request will 401, second request will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "timeout=5) assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def __init__(self, token='token'): self.token = token", "test_timeout(self): http = mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] ==", "authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials())", "authed_session.request('GET', self.TEST_URL) assert response == mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests)", "= make_response() mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response =", "self.TEST_URL) assert response == mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests) ==", "governing permissions and # limitations under the License. import mock import requests import", "assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url == self.TEST_URL", "assert not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization']", "ANY KIND, either express or implied. # See the License for the specific", "headers): headers['authorization'] = self.token def before_request(self, request, method, url, headers): self.apply(headers) def refresh(self,", "= responses self.requests = [] self.headers = headers or {} def send(self, request,", "**kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code = status", "import requests import requests.adapters from six.moves import http_client import google.auth.transport.requests from tests.transport import", "from six.moves import http_client import google.auth.transport.requests from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def", "assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert", "mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert mock_adapter.requests[1].url == self.TEST_URL assert mock_adapter.requests[1].headers['authorization']", "== self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert mock_adapter.requests[1].url == self.TEST_URL assert mock_adapter.requests[1].headers['authorization'] ==", "in compliance with the License. # You may obtain a copy of the", "assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url ==", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "+= '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses = responses self.requests =", "class MockCredentials(object): def __init__(self, token='token'): self.token = token def apply(self, headers): headers['authorization'] =", "__init__(self, token='token'): self.token = token def apply(self, headers): headers['authorization'] = self.token def before_request(self,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses = responses", "make_response() mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET',", "2 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert mock_adapter.requests[1].url == self.TEST_URL", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "def apply(self, headers): headers['authorization'] = self.token def before_request(self, request, method, url, headers): self.apply(headers)", "not use this file except in compliance with the License. # You may", "assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def", "self.requests = [] self.headers = headers or {} def send(self, request, **kwargs): self.requests.append(request)", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def __init__(self, token='token'): self.token = token def apply(self,", "google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response =", "= google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_final_response", "def send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response = requests.Response()", "assert response == mock_final_response assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) ==", "mock_final_response = make_response(status=http_client.OK) # First request will 401, second request will succeed. mock_adapter", "return response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials)", "See the License for the specific language governing permissions and # limitations under", "# limitations under the License. import mock import requests import requests.adapters from six.moves", "'1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses = responses self.requests = []", "= google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "== 'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First request", "not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] ==", "License, Version 2.0 (the \"License\"); # you may not use this file except", "== mock_final_response assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock() request", "1 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials =", "import google.auth.transport.requests from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response =", "== 2 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert mock_adapter.requests[1].url ==", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_final_response assert mock_credentials.before_request.call_count == 2", "mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Google Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code = status response._content =", "= mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5 class", "responses, headers=None): self.responses = responses self.requests = [] self.headers = headers or {}", "OF ANY KIND, either express or implied. # See the License for the", "self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses = responses self.requests", "before_request(self, request, method, url, headers): self.apply(headers) def refresh(self, request): self.token += '1' class", "self.apply(headers) def refresh(self, request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None):", "MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL)", "2.0 (the \"License\"); # you may not use this file except in compliance", "= headers or {} def send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK,", "self.responses.pop(0) def make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code = status response._content = data", "data return response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession(", "class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials", "response == mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert", "def __init__(self, token='token'): self.token = token def apply(self, headers): headers['authorization'] = self.token def", "= status response._content = data return response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def", "# you may not use this file except in compliance with the License.", "assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] ==", "= 'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def", "# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version", "mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_final_response assert mock_credentials.before_request.call_count", "== mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter = MockAdapter([mock_response])", "agreed to in writing, software # distributed under the License is distributed on", "request, **kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code =", "== 5 class MockCredentials(object): def __init__(self, token='token'): self.token = token def apply(self, headers):", "apply(self, headers): headers['authorization'] = self.token def before_request(self, request, method, url, headers): self.apply(headers) def", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials)", "request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def", "mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_response assert mock_credentials.before_request.called assert not", "TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock() request = google.auth.transport.requests.Request(http)", "import mock import requests import requests.adapters from six.moves import http_client import google.auth.transport.requests from", "len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert mock_adapter.requests[1].url", "(the \"License\"); # you may not use this file except in compliance with", "response.status_code = status response._content = data return response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/'", "response = requests.Response() response.status_code = status response._content = data return response class TestAuthorizedHttp(object):", "# # Unless required by applicable law or agreed to in writing, software", "def before_request(self, request, method, url, headers): self.apply(headers) def refresh(self, request): self.token += '1'", "tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http =", "def test_timeout(self): http = mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout']", "class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses = responses self.requests = [] self.headers", "def __init__(self, responses, headers=None): self.responses = responses self.requests = [] self.headers = headers", "mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object):", "= requests.Response() response.status_code = status response._content = data return response class TestAuthorizedHttp(object): TEST_URL", "self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK)", "express or implied. # See the License for the specific language governing permissions", "'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First request will", "len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self):", "response = authed_session.request('GET', self.TEST_URL) assert response == mock_final_response assert mock_credentials.before_request.call_count == 2 assert", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "six.moves import http_client import google.auth.transport.requests from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self):", "except in compliance with the License. # You may obtain a copy of", "mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_response assert mock_credentials.before_request.called", "mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url ==", "second request will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials)", "by applicable law or agreed to in writing, software # distributed under the", "succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response", "mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url == self.TEST_URL", "= MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert", "== 1 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials())", "request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def __init__(self, token='token'): self.token", "self.token = token def apply(self, headers): headers['authorization'] = self.token def before_request(self, request, method,", "or {} def send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response", "import http_client import google.auth.transport.requests from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return", "either express or implied. # See the License for the specific language governing", "mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response", "requests.Response() response.status_code = status response._content = data return response class TestAuthorizedHttp(object): TEST_URL =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_response assert", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "mock_final_response assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url", "authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response == mock_response assert mock_credentials.before_request.called assert", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL)", "self.headers = headers or {} def send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0) def", "file except in compliance with the License. # You may obtain a copy", "self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code = status response._content", "the License. import mock import requests import requests.adapters from six.moves import http_client import", "assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) #", "from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http", "requests import requests.adapters from six.moves import http_client import google.auth.transport.requests from tests.transport import compliance", "5 class MockCredentials(object): def __init__(self, token='token'): self.token = token def apply(self, headers): headers['authorization']", "{} def send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None): response =", "def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter = MockAdapter([mock_response]) authed_session =", "request will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First request will 401, second request", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "token='token'): self.token = token def apply(self, headers): headers['authorization'] = self.token def before_request(self, request,", "make_response(status=http_client.OK) # First request will 401, second request will succeed. mock_adapter = MockAdapter([", "Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0", "mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response()", "the License. # You may obtain a copy of the License at #", "status response._content = data return response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self):", "headers=None): self.responses = responses self.requests = [] self.headers = headers or {} def", "to in writing, software # distributed under the License is distributed on an", "refresh(self, request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses, headers=None): self.responses =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "response._content = data return response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self): authed_session", "First request will 401, second request will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response])", "mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First", "401, second request will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession(", "= mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL,", "will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter)", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock() request =", "implied. # See the License for the specific language governing permissions and #", "== 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url == self.TEST_URL assert", "self.TEST_URL) assert response == mock_final_response assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests)", "mock_credentials.refresh.called assert len(mock_adapter.requests) == 2 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token'", "\"License\"); # you may not use this file except in compliance with the", "= google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def __init__(self,", "= authed_session.request('GET', self.TEST_URL) assert response == mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= [] self.headers = headers or {} def send(self, request, **kwargs): self.requests.append(request) return", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "== self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' def test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response =", "required by applicable law or agreed to in writing, software # distributed under", "request, method, url, headers): self.apply(headers) def refresh(self, request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter):", "'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self):", "def make_request(self): return google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com',", "make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert", "[] self.headers = headers or {} def send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0)", "test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession(", "applicable law or agreed to in writing, software # distributed under the License", "method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def __init__(self, token='token'): self.token =", "and # limitations under the License. import mock import requests import requests.adapters from", "self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token' assert mock_adapter.requests[1].url == self.TEST_URL assert mock_adapter.requests[1].headers['authorization'] == 'token1'", "mock_credentials.refresh.called assert len(mock_adapter.requests) == 1 assert mock_adapter.requests[0].url == self.TEST_URL assert mock_adapter.requests[0].headers['authorization'] == 'token'", "= data return response class TestAuthorizedHttp(object): TEST_URL = 'http://example.com/' def test_constructor(self): authed_session =", "authed_session.credentials == mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter =", "# First request will 401, second request will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED),", "the specific language governing permissions and # limitations under the License. import mock", "specific language governing permissions and # limitations under the License. import mock import", "authed_session.request('GET', self.TEST_URL) assert response == mock_final_response assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert", "assert response == mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called assert len(mock_adapter.requests) == 1", "or agreed to in writing, software # distributed under the License is distributed", "will 401, second request will succeed. mock_adapter = MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session =", "= MockAdapter([ make_response(status=http_client.UNAUTHORIZED), mock_final_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET',", "or implied. # See the License for the specific language governing permissions and", "headers): self.apply(headers) def refresh(self, request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def __init__(self, responses,", "make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code = status response._content = data return response", "method, url, headers): self.apply(headers) def refresh(self, request): self.token += '1' class MockAdapter(requests.adapters.BaseAdapter): def", "return google.auth.transport.requests.Request() def test_timeout(self): http = mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5)", "2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "headers or {} def send(self, request, **kwargs): self.requests.append(request) return self.responses.pop(0) def make_response(status=http_client.OK, data=None):", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "def make_response(status=http_client.OK, data=None): response = requests.Response() response.status_code = status response._content = data return", "response == mock_final_response assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called assert len(mock_adapter.requests) == 2", "License. import mock import requests import requests.adapters from six.moves import http_client import google.auth.transport.requests", "= authed_session.request('GET', self.TEST_URL) assert response == mock_final_response assert mock_credentials.before_request.call_count == 2 assert mock_credentials.refresh.called", "= mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First request will 401, second request will", "http = mock.Mock() request = google.auth.transport.requests.Request(http) request(url='http://example.com', method='GET', timeout=5) assert http.request.call_args[1]['timeout'] == 5", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "mock_response = make_response() mock_adapter = MockAdapter([mock_response]) authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response", "test_request_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_final_response = make_response(status=http_client.OK) # First request will 401, second", "in writing, software # distributed under the License is distributed on an \"AS", "http_client import google.auth.transport.requests from tests.transport import compliance class TestRequestResponse(compliance.RequestResponseTests): def make_request(self): return google.auth.transport.requests.Request()", "TEST_URL = 'http://example.com/' def test_constructor(self): authed_session = google.auth.transport.requests.AuthorizedSession( mock.sentinel.credentials) assert authed_session.credentials == mock.sentinel.credentials", "authed_session = google.auth.transport.requests.AuthorizedSession( mock_credentials) authed_session.mount(self.TEST_URL, mock_adapter) response = authed_session.request('GET', self.TEST_URL) assert response ==", "mock.sentinel.credentials def test_request_no_refresh(self): mock_credentials = mock.Mock(wraps=MockCredentials()) mock_response = make_response() mock_adapter = MockAdapter([mock_response]) authed_session", "response = authed_session.request('GET', self.TEST_URL) assert response == mock_response assert mock_credentials.before_request.called assert not mock_credentials.refresh.called", "token def apply(self, headers): headers['authorization'] = self.token def before_request(self, request, method, url, headers):", "permissions and # limitations under the License. import mock import requests import requests.adapters", "mock import requests import requests.adapters from six.moves import http_client import google.auth.transport.requests from tests.transport", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "assert http.request.call_args[1]['timeout'] == 5 class MockCredentials(object): def __init__(self, token='token'): self.token = token def" ]
[ "= list(reader) images_per_language = {'dutch': set(), 'german': set(), 'english': set()} for entry in", "images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in", "csv.DictReader(f) entries = list(reader) images_per_language = {'dutch': set(), 'german': set(), 'english': set()} for", "= venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0)", "patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20) for label in diagram.subset_labels: label.set_size(20)", "for label in diagram.set_labels: label.set_size(20) for label in diagram.subset_labels: label.set_size(20) # Minor tweaks", "label.set_size(20) for label in diagram.subset_labels: label.set_size(20) # Minor tweaks label_12 = diagram.subset_labels[2] x,y", "= entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches:", "for label in diagram.subset_labels: label.set_size(20) # Minor tweaks label_12 = diagram.subset_labels[2] x,y =", "# Minor tweaks label_12 = diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 =", "import csv import matplotlib.pyplot as plt # pip install matplotlib-venn from matplotlib_venn import", "= entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for", "open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f) entries = list(reader) images_per_language = {'dutch': set(),", "= csv.DictReader(f) entries = list(reader) images_per_language = {'dutch': set(), 'german': set(), 'english': set()}", "import matplotlib.pyplot as plt # pip install matplotlib-venn from matplotlib_venn import venn3 with", "list(reader) images_per_language = {'dutch': set(), 'german': set(), 'english': set()} for entry in entries:", "set(), 'german': set(), 'english': set()} for entry in entries: language = entry['language'] flickr_id", "patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20) for", "{'dutch': set(), 'german': set(), 'english': set()} for entry in entries: language = entry['language']", "flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in", "as f: reader = csv.DictReader(f) entries = list(reader) images_per_language = {'dutch': set(), 'german':", "label in diagram.set_labels: label.set_size(20) for label in diagram.subset_labels: label.set_size(20) # Minor tweaks label_12", "= label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 = diagram.subset_labels[4] x,y = label_11.get_position() #label_11.set_x(x-0.025) label_11.set_y(y-0.07) plt.savefig('../Output/ethnicity.pdf')", "plt # pip install matplotlib-venn from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as f:", "with open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f) entries = list(reader) images_per_language = {'dutch':", "language = entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English'])", "['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels:", "in diagram.set_labels: label.set_size(20) for label in diagram.subset_labels: label.set_size(20) # Minor tweaks label_12 =", "for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20)", "diagram.set_labels: label.set_size(20) for label in diagram.subset_labels: label.set_size(20) # Minor tweaks label_12 = diagram.subset_labels[2]", "diagram.subset_labels: label.set_size(20) # Minor tweaks label_12 = diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02)", "Minor tweaks label_12 = diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 = diagram.subset_labels[4]", "for entry in entries: language = entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram =", "x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 = diagram.subset_labels[4] x,y = label_11.get_position() #label_11.set_x(x-0.025) label_11.set_y(y-0.07)", "# pip install matplotlib-venn from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader", "diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20) for label in", "entries = list(reader) images_per_language = {'dutch': set(), 'german': set(), 'english': set()} for entry", "tweaks label_12 = diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 = diagram.subset_labels[4] x,y", "label_12 = diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 = diagram.subset_labels[4] x,y =", "entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch", "patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20) for label in diagram.subset_labels:", "in diagram.subset_labels: label.set_size(20) # Minor tweaks label_12 = diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03)", "in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20) for label", "images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1)", "matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f) entries = list(reader)", "import venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f) entries = list(reader) images_per_language", "'german': set(), 'english': set()} for entry in entries: language = entry['language'] flickr_id =", "csv import matplotlib.pyplot as plt # pip install matplotlib-venn from matplotlib_venn import venn3", "images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for label", "as plt # pip install matplotlib-venn from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as", "patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20) for label in diagram.subset_labels: label.set_size(20) # Minor", "diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black')", "label in diagram.subset_labels: label.set_size(20) # Minor tweaks label_12 = diagram.subset_labels[2] x,y = label_12.get_position()", "venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f) entries = list(reader) images_per_language =", "in entries: language = entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'],", "matplotlib.pyplot as plt # pip install matplotlib-venn from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv')", "'english': set()} for entry in entries: language = entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id)", "entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white')", "set(), 'english': set()} for entry in entries: language = entry['language'] flickr_id = entry['flickr_id']", "= {'dutch': set(), 'german': set(), 'english': set()} for entry in entries: language =", "entry in entries: language = entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'],", "install matplotlib-venn from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f)", "images_per_language = {'dutch': set(), 'german': set(), 'english': set()} for entry in entries: language", "= diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 = diagram.subset_labels[4] x,y = label_11.get_position()", "f: reader = csv.DictReader(f) entries = list(reader) images_per_language = {'dutch': set(), 'german': set(),", "patch.set_edgecolor('black') patch.set_alpha(1.0) for label in diagram.set_labels: label.set_size(20) for label in diagram.subset_labels: label.set_size(20) #", "diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11 = diagram.subset_labels[4] x,y = label_11.get_position() #label_11.set_x(x-0.025)", "set()} for entry in entries: language = entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram", "label.set_size(20) # Minor tweaks label_12 = diagram.subset_labels[2] x,y = label_12.get_position() label_12.set_y(y+0.03) label_12.set_x(x+0.02) label_11", "from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f) entries =", "matplotlib-venn from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader = csv.DictReader(f) entries", "reader = csv.DictReader(f) entries = list(reader) images_per_language = {'dutch': set(), 'german': set(), 'english':", "venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']], ['Dutch','German','English']) for patch in diagram.patches: patch.set_facecolor('white') patch.set_linewidth(1) patch.set_edgecolor('black') patch.set_alpha(1.0) for", "entries: language = entry['language'] flickr_id = entry['flickr_id'] images_per_language[language].add(flickr_id) diagram = venn3([images_per_language['dutch'], images_per_language['german'], images_per_language['english']],", "pip install matplotlib-venn from matplotlib_venn import venn3 with open('../../Data/Annotations/ethnicity.csv') as f: reader =" ]
[ "disable it by not passing a REQUIREMENTS_FILE variable when building # the docker", ".logging import * DEBUG = True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL =", "* DEBUG = True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL #", "by not passing a REQUIREMENTS_FILE variable when building # the docker containers if", "[\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return True DEBUG_TOOLBAR_CONFIG = { \"SHOW_TOOLBAR_CALLBACK\": show_toolbar, }", "want the debug toolbar on their local environments, # so we can disable", "== \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ]", "so we can disable it by not passing a REQUIREMENTS_FILE variable when building", "Debug Toolbar config # ------------------------------------------------------------------------------ # Sometimes, developers do not want the debug", "import * DEBUG = True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL", "debug toolbar on their local environments, # so we can disable it by", "# so we can disable it by not passing a REQUIREMENTS_FILE variable when", "from .logging import * DEBUG = True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL", "# Django Debug Toolbar config # ------------------------------------------------------------------------------ # Sometimes, developers do not want", "Django Debug Toolbar config # ------------------------------------------------------------------------------ # Sometimes, developers do not want the", "toolbar on their local environments, # so we can disable it by not", "on their local environments, # so we can disable it by not passing", "environments, # so we can disable it by not passing a REQUIREMENTS_FILE variable", "passing a REQUIREMENTS_FILE variable when building # the docker containers if os.environ[\"REQUIREMENTS_FILE\"] ==", "the docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ]", "containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE +=", "INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return True DEBUG_TOOLBAR_CONFIG = { \"SHOW_TOOLBAR_CALLBACK\":", "+= [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\",", "variable when building # the docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS +=", "<<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug Toolbar config # ------------------------------------------------------------------------------ # Sometimes,", "= True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug", "INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS =", "[ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\",", "\"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"]", "\"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS", "not passing a REQUIREMENTS_FILE variable when building # the docker containers if os.environ[\"REQUIREMENTS_FILE\"]", "docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE", "SERVER_EMAIL # Django Debug Toolbar config # ------------------------------------------------------------------------------ # Sometimes, developers do not", "building # the docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\",", "Sometimes, developers do not want the debug toolbar on their local environments, #", "their local environments, # so we can disable it by not passing a", "True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug Toolbar", "------------------------------------------------------------------------------ # Sometimes, developers do not want the debug toolbar on their local", "Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug Toolbar config # ------------------------------------------------------------------------------ #", "SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug Toolbar config", "developers do not want the debug toolbar on their local environments, # so", "+= [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return True", "= \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug Toolbar config #", "MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return", "] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return True DEBUG_TOOLBAR_CONFIG = {", "do not want the debug toolbar on their local environments, # so we", "* from .logging import * DEBUG = True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\"", "# ------------------------------------------------------------------------------ # Sometimes, developers do not want the debug toolbar on their", "we can disable it by not passing a REQUIREMENTS_FILE variable when building #", ".base import * from .logging import * DEBUG = True SERVER_EMAIL = \"Wikilink", "from .base import * from .logging import * DEBUG = True SERVER_EMAIL =", "DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug Toolbar config # ------------------------------------------------------------------------------ # Sometimes, developers", "if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [", "\"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return True DEBUG_TOOLBAR_CONFIG =", "# Sometimes, developers do not want the debug toolbar on their local environments,", "= SERVER_EMAIL # Django Debug Toolbar config # ------------------------------------------------------------------------------ # Sometimes, developers do", "# the docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\",", "REQUIREMENTS_FILE variable when building # the docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS", "DEBUG = True SERVER_EMAIL = \"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django", "when building # the docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [", "os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\": INSTALLED_APPS += [ \"debug_toolbar\", \"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\",", "<reponame>suecarmol/externallinks<gh_stars>1-10 from .base import * from .logging import * DEBUG = True SERVER_EMAIL", "it by not passing a REQUIREMENTS_FILE variable when building # the docker containers", "= [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return True DEBUG_TOOLBAR_CONFIG = { \"SHOW_TOOLBAR_CALLBACK\": show_toolbar,", "] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request):", "local environments, # so we can disable it by not passing a REQUIREMENTS_FILE", "a REQUIREMENTS_FILE variable when building # the docker containers if os.environ[\"REQUIREMENTS_FILE\"] == \"local.txt\":", "config # ------------------------------------------------------------------------------ # Sometimes, developers do not want the debug toolbar on", "the debug toolbar on their local environments, # so we can disable it", "can disable it by not passing a REQUIREMENTS_FILE variable when building # the", "\"django_extensions\", ] MIDDLEWARE += [ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def", "[ \"debug_toolbar.middleware.DebugToolbarMiddleware\", ] INTERNAL_IPS = [\"127.0.0.1\", \"localhost\", \"0.0.0.0\"] def show_toolbar(request): return True DEBUG_TOOLBAR_CONFIG", "Toolbar config # ------------------------------------------------------------------------------ # Sometimes, developers do not want the debug toolbar", "import * from .logging import * DEBUG = True SERVER_EMAIL = \"Wikilink Local", "not want the debug toolbar on their local environments, # so we can", "\"Wikilink Local <<EMAIL>>\" DEFAULT_FROM_EMAIL = SERVER_EMAIL # Django Debug Toolbar config # ------------------------------------------------------------------------------" ]
[ "x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255,", "self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\"))", "stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\"", "this file unless you know what you are doing. from PyQt5 import QtCore,", "self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font", "self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3)", "\"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item", "= self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\"))", "are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow):", "25, 255, 255));\\n\" \" border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar)", "background-color: rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255, 255, 255);\\n\"", "self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow)", "item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\")) self.pushButton_4.setText(_translate(\"MainWindow\", \"Pause\")) self.pushButton_5.setText(_translate(\"MainWindow\", \"Resume\")) self.pushButton.setText(_translate(\"MainWindow\", \"Load Data\"))", "\"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget)", "\" height: 20px;\\n\" \" border-radius: 10px;\\n\" \" border: none;\\n\" \" padding: 2px;\\n\" \"", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem()", "255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1)", "changes made to this file will be lost when pyuic5 is # run", "\"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0,", "rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255, 255, 255);\\n\" \"", "self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item", "\"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226,", "255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item =", "\" width : 60px;\\n\" \" height: 20px;\\n\" \" border-radius: 10px;\\n\" \" border: none;\\n\"", "QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)", "self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 =", "color: rgb(255, 255, 255);\\n\" \" width : 60px;\\n\" \" height: 20px;\\n\" \" border-radius:", "QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\" \" border-style:", "file 'loader2.ui' # # Created by: PyQt5 UI code generator 5.15.4 # #", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item)", "self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 =", "\" border-radius: 10px;\\n\" \" border: none;\\n\" \" padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\")", "rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item)", "\"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item =", "self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)", "self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color:", "self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0)", "1px;\\n\" \"\\n\" \" color: rgb(255, 255, 255);\\n\" \" width : 60px;\\n\" \" height:", "-*- coding: utf-8 -*- # Form implementation generated from reading ui file 'loader2.ui'", "reading ui file 'loader2.ui' # # Created by: PyQt5 UI code generator 5.15.4", "255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8,", "rgb(255, 255, 255);\\n\" \" width : 60px;\\n\" \" height: 20px;\\n\" \" border-radius: 10px;\\n\"", "self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item", "self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20,", "MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow)", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item)", "255, 255);\\n\" \" width : 60px;\\n\" \" height: 20px;\\n\" \" border-radius: 10px;\\n\" \"", "self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\")", "\"\\n\" \" background-color: rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255,", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item =", "QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item =", "38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\\n\"", "self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\")", "self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185,", "QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\")", "self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False)", "\".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31,", "self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label)", "def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False)", "self.pushButton) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\"))", "item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item =", "# -*- coding: utf-8 -*- # Form implementation generated from reading ui file", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem()", "QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21))", "QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar =", "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118,", "rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\"", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item)", "QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False)", "border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\")", "self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item", "item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item", "= QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38,", "\" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1", "self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton", "= QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 =", "self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item =", "0, 185, 255), stop:1 rgba(21, 25, 255, 255));\\n\" \" border-radius: 10px;\\n\" \" \\n\"", "self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 =", "# Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual", "= QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)", "self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\")) self.pushButton_4.setText(_translate(\"MainWindow\", \"Pause\")) self.pushButton_5.setText(_translate(\"MainWindow\", \"Resume\")) self.pushButton.setText(_translate(\"MainWindow\", \"Load", "\"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\"", "255), stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color:", "unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets", "\\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)", "doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\")", "\"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item =", "= QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2,", "46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\"", "\" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0, 1);\\n\"", "QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem)", "rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255, 255));\\n\" \" border-radius: 10px;\\n\" \"", "= self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\")) self.pushButton_4.setText(_translate(\"MainWindow\", \"Pause\")) self.pushButton_5.setText(_translate(\"MainWindow\", \"Resume\")) self.pushButton.setText(_translate(\"MainWindow\",", "93, 255), stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \"", "UI code generator 5.15.4 # # WARNING: Any manual changes made to this", "QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow)", "255);\\n\" \" width : 60px;\\n\" \" height: 20px;\\n\" \" border-radius: 10px;\\n\" \" border:", "self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\")", "self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\"", "\" padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem()", "= self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\",", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget)", "self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton =", "PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to", "# # WARNING: Any manual changes made to this file will be lost", "426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1,", "run again. Do not edit this file unless you know what you are", "lost when pyuic5 is # run again. Do not edit this file unless", "rgb(200, 200, 200);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \"", "10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3", "QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\")", "QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\")", "rgba(21, 25, 255, 255));\\n\" \" border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\")", "\"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item =", "MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0,", "this file will be lost when pyuic5 is # run again. Do not", "x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778", "5.15.4 # # WARNING: Any manual changes made to this file will be", "self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout)", "y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255, 255));\\n\" \" border-radius:", "item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\"", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem()", "QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\")", "QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont()", "item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\")) self.pushButton_4.setText(_translate(\"MainWindow\", \"Pause\"))", "code generator 5.15.4 # # WARNING: Any manual changes made to this file", "QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255,", "item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9)", "spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2)", "\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255),", "200);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \" \\n\" \"}\\n\"", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\"", "you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self,", "qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61,", "will be lost when pyuic5 is # run again. Do not edit this", "item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6)", "# WARNING: Any manual changes made to this file will be lost when", "self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit)", "self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\"))", "\" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778", "\" background-color: rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255, 255,", "self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\")) self.pushButton_4.setText(_translate(\"MainWindow\",", "60px;\\n\" \" height: 20px;\\n\" \" border-radius: 10px;\\n\" \" border: none;\\n\" \" padding: 2px;\\n\"", "stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255, 255));\\n\" \" border-radius: 10px;\\n\"", "self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\"))", "made to this file will be lost when pyuic5 is # run again.", "generator 5.15.4 # # WARNING: Any manual changes made to this file will", "\"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\"", "Any manual changes made to this file will be lost when pyuic5 is", "QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40,", "QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38,", "self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar =", "MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def", "border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255, 255, 255);\\n\" \" width : 60px;\\n\" \"", "MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item =", "padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 =", "self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate", "center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1,", "self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem()", "QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget", "file will be lost when pyuic5 is # run again. Do not edit", "\"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\"", "coding: utf-8 -*- # Form implementation generated from reading ui file 'loader2.ui' #", "self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\")", "200, 200);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \" \\n\"", "QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)", "self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6,", "QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False)", "ui file 'loader2.ui' # # Created by: PyQt5 UI code generator 5.15.4 #", "not edit this file unless you know what you are doing. from PyQt5", "self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \"", "self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\"))", "\"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2)", ": 60px;\\n\" \" height: 20px;\\n\" \" border-radius: 10px;\\n\" \" border: none;\\n\" \" padding:", "self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5)", "self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item", "\" color: rgb(200, 200, 200);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align:", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem()", "self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20,", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item =", "item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5)", "PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426)", "# # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any", "from reading ui file 'loader2.ui' # # Created by: PyQt5 UI code generator", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item)", "retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item", "= QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color:", "= self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\",", "QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255,", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget)", "QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4)", "y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255, 255));\\n\"", "\" border-radius: 10px;\\n\" \" text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \"", "= QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True)", "38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\"", "self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2", "\" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")", "MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item", "10px;\\n\" \" text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad,", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5,", "self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0,", "item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7)", "= self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\",", "\"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item =", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3,", "\"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item =", "= QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem =", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item)", "self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font)", "\"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0,", "= self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\",", "\"\\n\" \" color: rgb(255, 255, 255);\\n\" \" width : 60px;\\n\" \" height: 20px;\\n\"", "= QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar", "self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self,", "item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2)", "\"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color:", "item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item", "500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit)", "Do not edit this file unless you know what you are doing. from", "what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def", "item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item", "61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\"", "know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object):", "2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout()", "self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\"))", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2,", "self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1)", "item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\")) self.pushButton_4.setText(_translate(\"MainWindow\", \"Pause\")) self.pushButton_5.setText(_translate(\"MainWindow\", \"Resume\"))", "MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0,", "self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget)", "self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate", "self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar =", "rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding,", "import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\")", "stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\"", "self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget)", "rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \"", "20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)", "from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500,", "\" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 =", "MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate =", "= QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit =", "= QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar)", "= QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)", "20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False)", "Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \"", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True)", "# Form implementation generated from reading ui file 'loader2.ui' # # Created by:", "generated from reading ui file 'loader2.ui' # # Created by: PyQt5 UI code", "self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20,", "\"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\",", "= QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1", "self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4,", "setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad,", "\"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185,", "stop:1 rgba(21, 25, 255, 255));\\n\" \" border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24)", "\" text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0,", "self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))", "self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem()", "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21,", "WARNING: Any manual changes made to this file will be lost when pyuic5", "class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\"", "x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255, 255));\\n\" \"", "none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\"", "= QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\" \"", "\" background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\" \" border-style: none;\\n\" \" border-radius:", "QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget =", "self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item)", "-*- # Form implementation generated from reading ui file 'loader2.ui' # # Created", "transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9,", "20px;\\n\" \" border-radius: 10px;\\n\" \" border: none;\\n\" \" padding: 2px;\\n\" \" \\n\" \"}\\n\"", "185, 255), stop:1 rgba(21, 25, 255, 255));\\n\" \" border-radius: 10px;\\n\" \" \\n\" \"}\")", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \"", "item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item", "self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item", "y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87,", "\"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \"", "self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2)", "\"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item =", "= QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4", "self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10)", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item)", "self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow)", "\"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item =", "MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\",", "\" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\"", "width : 60px;\\n\" \" height: 20px;\\n\" \" border-radius: 10px;\\n\" \" border: none;\\n\" \"", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem()", "self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36)", "\\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label", "file unless you know what you are doing. from PyQt5 import QtCore, QtGui,", "'loader2.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING:", "self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0,", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item =", "\\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455", "\"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \"", "\"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\")", "\"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\"))", "transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0, 1);\\n\" \" border-radius:", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True)", "QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 =", "= QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255,", "item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item", "21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit,", "255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\"", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False)", "font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding,", "stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46,", "be lost when pyuic5 is # run again. Do not edit this file", "color: rgb(200, 200, 200);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\"", "background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\"", "\"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label =", "again. Do not edit this file unless you know what you are doing.", "255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\"", "item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\",", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7,", "border-radius: 10px;\\n\" \" text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color:", "# run again. Do not edit this file unless you know what you", "self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton)", "item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4)", "self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\"))", "20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\")", "self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_5.setObjectName(\"pushButton_5\") self.horizontalLayout.addWidget(self.pushButton_5) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget)", "self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1,", "QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color:", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) self.tableWidget.horizontalHeader().setHighlightSections(False) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar", "item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item", "self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item", "when pyuic5 is # run again. Do not edit this file unless you", "self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\"))", "border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{", "\".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \" color:", "MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\",", "self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0,", "MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5)", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem()", "255));\\n\" \" border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout =", "MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0", "item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8)", "QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item)", "self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"", "= QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\")", "self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem", "\" color: rgb(255, 255, 255);\\n\" \" width : 60px;\\n\" \" height: 20px;\\n\" \"", "= self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\",", "MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1,", "= QtWidgets.QPushButton(self.centralwidget) self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500,", "255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item", "def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(500, 426) MainWindow.setStyleSheet(\"\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color:", "QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton)", "_translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0)", "self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True)", "255), stop:1 rgba(21, 25, 255, 255));\\n\" \" border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\",", "self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True) self.pushButton_2.setObjectName(\"pushButton_2\") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_4.setObjectName(\"pushButton_4\") self.horizontalLayout.addWidget(self.pushButton_4) self.pushButton_5", "QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow)", "1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255, 255, 255);\\n\" \" width :", "self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\" \" border-style: none;\\n\"", "= QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\") self.tableWidget.setColumnCount(10) self.tableWidget.setRowCount(0) item", "text-align: center;\\n\" \" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0,", "self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar)", "MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\",", "10px;\\n\" \" border: none;\\n\" \" padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item =", "self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255,", "self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200, 200);\\n\" \" border-style: none;\\n\" \"", "rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43,", "self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\"))", "border-radius: 10px;\\n\" \" border: none;\\n\" \" padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\")", "self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\")", "= self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\",", "\"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\")) item =", "item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtWidgets.QTableWidgetItem()", "self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40,", "= QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\",", "Form implementation generated from reading ui file 'loader2.ui' # # Created by: PyQt5", "0, 1);\\n\" \" border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255, 255, 255);\\n\" \" width", "implementation generated from reading ui file 'loader2.ui' # # Created by: PyQt5 UI", "MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3,", "utf-8 -*- # Form implementation generated from reading ui file 'loader2.ui' # #", "x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93,", "height: 20px;\\n\" \" border-radius: 10px;\\n\" \" border: none;\\n\" \" padding: 2px;\\n\" \" \\n\"", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item =", "self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2)", "\" \\n\" \"}\\n\" \"\\n\" \"QProgressBar::chunk{ \\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0,", "self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True)", "spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName(\"label_2\") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit", "pyuic5 is # run again. Do not edit this file unless you know", "self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color: rgb(200,", "self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3)", "\" border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout()", "self.tableWidget.horizontalHeaderItem(3) item.setText(_translate(\"MainWindow\", \"Ratings\")) item = self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\"))", "= QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font =", "rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\"", "= self.tableWidget.horizontalHeaderItem(4) item.setText(_translate(\"MainWindow\", \"EarningLabel\")) item = self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\",", "= QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4,", "self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setEnabled(True)", "self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2)", "is # run again. Do not edit this file unless you know what", "by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made", "QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setMouseTracking(False) self.lineEdit.setStyleSheet(\"color: rgb(255, 255, 255);\\n\" \"background-color: transparent;\") self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 =", "self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\", \"description\")) item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\"))", "you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class", "\"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\", \"OK\")) self.pushButton_4.setText(_translate(\"MainWindow\", \"Pause\")) self.pushButton_5.setText(_translate(\"MainWindow\",", "\" border-radius: 1px;\\n\" \"\\n\" \" color: rgb(255, 255, 255);\\n\" \" width : 60px;\\n\"", "QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.label.setText(_translate(\"MainWindow\", \"Scrapelancer\")) self.label_2.setText(_translate(\"MainWindow\", \"Search\")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate(\"MainWindow\", \"Username\"))", "self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40,", "\" border: none;\\n\" \" padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout =", "item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item =", "self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setHighlightSections(False) self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color:", "self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item", "QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\")", "item = self.tableWidget.horizontalHeaderItem(8) item.setText(_translate(\"MainWindow\", \"skills\")) item = self.tableWidget.horizontalHeaderItem(9) item.setText(_translate(\"MainWindow\", \"imgSrc\")) self.pushButton_3.setText(_translate(\"MainWindow\", \"Sort\")) self.pushButton_2.setText(_translate(\"MainWindow\",", "MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.pushButton) def retranslateUi(self, MainWindow):", "24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2", "Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes", "self.tableWidget.setHorizontalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item", "self.lineEdit.setText(\"\") self.lineEdit.setObjectName(\"lineEdit\") self.horizontalLayout_2.addWidget(self.lineEdit) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget =", "self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item =", "edit this file unless you know what you are doing. from PyQt5 import", "self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color: rgb(255, 255, 255);\") self.tableWidget.setAutoScroll(False) self.tableWidget.setCornerButtonEnabled(False) self.tableWidget.setObjectName(\"tableWidget\")", "self.verticalLayout.addWidget(self.tableWidget) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setEnabled(True) self.progressBar.setStyleSheet(\"QProgressBar{\\n\" \" background-color: rgb(98,114,164);\\n\" \" color: rgb(200, 200,", "to this file will be lost when pyuic5 is # run again. Do", "self.centralwidget.setStyleSheet(\"QWidget{\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255),", "= QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName(\"horizontalLayout\") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setObjectName(\"pushButton_3\") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding,", "self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4) MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5,", "43, 255));\\n\" \"}\\n\" \"\\n\" \"\\n\" \".QLabel{\\n\" \"\\n\" \" background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\"", "spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setEnabled(True) self.tableWidget.setStyleSheet(\"background-color:", "background-color: transparent;\\n\" \"\\n\" \"}\\n\" \"\\n\" \".QPushButton{\\n\" \"\\n\" \" background-color: rgb(31, 0, 1);\\n\" \"", "y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255),", "qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25,", "manual changes made to this file will be lost when pyuic5 is #", "0, 500, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget,", "QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.tableWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4)", "255, 255));\\n\" \" border-radius: 10px;\\n\" \" \\n\" \"}\") self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(\"progressBar\") self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout", "self.pushButton.setEnabled(True) self.pushButton.setObjectName(\"pushButton\") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21)) self.menubar.setObjectName(\"menubar\")", "none;\\n\" \" padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(\"verticalLayout\")", "font.setPointSize(36) self.label.setFont(font) self.label.setStyleSheet(\"\") self.label.setObjectName(\"label\") self.horizontalLayout_2.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem) self.label_2", "item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate(\"MainWindow\", \"Tagline\")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate(\"MainWindow\", \"Country\")) item = self.tableWidget.horizontalHeaderItem(3)", "border: none;\\n\" \" padding: 2px;\\n\" \" \\n\" \"}\\n\" \"\") self.centralwidget.setObjectName(\"centralwidget\") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)", "= self.tableWidget.horizontalHeaderItem(5) item.setText(_translate(\"MainWindow\", \"Reviews\")) item = self.tableWidget.horizontalHeaderItem(6) item.setText(_translate(\"MainWindow\", \"Rate\")) item = self.tableWidget.horizontalHeaderItem(7) item.setText(_translate(\"MainWindow\",", "QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item)" ]
[ "bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix current pose\" bl_options = {'REGISTER', 'UNDO'} influence", "elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname =", "Foundation; either version 2 # of the License, or (at your option) any", "'POSE' ) # in the above, remove the last one once I get", "with copied pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100,", "PURPOSE. See the # GNU General Public License for more details. # #", "Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def register():", "# as published by the Free Software Foundation; either version 2 # of", "value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for mixing poses class mixCurrentPose(bpy.types.Operator):", "context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) # in the above, remove", "min = 0, max = 100, description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose Index\",", "col2 = layout.column(align=True) if poselib: if pose_marker_active is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply", "= bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between the original pose and the new", "library pose on to the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix", "LICENSE BLOCK ##### # # This program is free software; you can redistribute", "pose = [] b = bpy.context.selected_pose_bones for a in b: rotway = a.rotation_mode", "GPL LICENSE BLOCK ##### bl_info = { \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\":", "the License, or (at your option) any later version. # # This program", "# or even make it a dropdown? and have the numbers become the", "#rotation_quaternion, not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') #######", "layout = self.layout row = layout.row() row.label(text=\"Pose Library\") ob = context.object try: poselib", "####### # The tool for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library", "rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle': # it's a list", "the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix current pose\" bl_options =", "subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf)", "on to the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix current pose\"", "= context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label", "add frame of the last un-used datablock! col.operator_context = 'EXEC_DEFAULT' # exec not", "frame where print(\"getting there eventually\") # brute force copies all location/rotation/scale of all", "# ##### END GPL LICENSE BLOCK ##### bl_info = { \"name\": \"PoseTools\", \"author\":", "post library tools in armature tab\"\"\" layout = self.layout col = layout.split(align=True) p", "hasattr(bpy.app, \"version\") and bpy.app.version >= (2, 80) return BV_IS_28 def poseAddLimited(ob, frame): #", "and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) def pose_tools_panel(self, context): \"\"\"UI", "self.layout row = layout.row() row.label(text=\"Pose Library\") ob = context.object try: poselib = ob.pose_library", "rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix back in", "rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle': # it's a list type, so can't/no", "= \"rotation_quaternion\" # for now, fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname =", "poses in library and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy", "p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for x in range(len(p[2])):", "= getPose(ob.pose) # each element is a list of vectors, [loc, rot (quat.),", "def bv28(): \"\"\"Check if blender 2.8, for layouts, UI, and properties. \"\"\" global", "= linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2],", "\"TOOLS\" # bl_context = \"posemode\" bl_category = \"Tool\" if bv28() else 'Tools' def", "should have received a copy of the GNU General Public License # along", "a.rotation_mode rotname = '' if rotway in ['QUATERNION']: rotname = \"rotation_quaternion\" # for", "pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix current pose\" bl_options = {'REGISTER', 'UNDO'}", "generic function for mixing two poses def mixToPose(ob, pose, value): def linmix(orig, new,", "of poses in pose library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers,", "LICENSE BLOCK ##### bl_info = { \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1,", "property here for which pose, like the input one? # or even make", "each element is a list of vectors, [loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index)", "for new tool, drawn next to the built-in post library tools in armature", "= layout.row() row.label(text=\"Pose Library\") ob = context.object try: poselib = ob.pose_library except: row", "either version 2 # of the License, or (at your option) any later", "Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE", "BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you", "without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "a in b: rotway = a.rotation_mode rotname = '' if rotway in ['QUATERNION']:", "'' if rotway in ['QUATERNION']: rotname = \"rotation_quaternion\" # for now, fix later", "linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] == \"rotation_euler\": b.rotation_euler[0]", "p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value)", "of the current pose ob = context.object prePose = getPose(ob.pose) # each element", "drawn next to the built-in post library tools in armature tab\"\"\" layout =", "will be passed in min = 0, description=\"pose index\" ) # make a", "description=\"influence\" ) def execute(self, context): ob = context.object prePose = getPose(ob.pose) #get a", "else 'Tools' def draw(self, context): layout = self.layout row = layout.row() row.label(text=\"Pose Library\")", "BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version >= (2, 80) return BV_IS_28 def", "not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index", "free software; you can redistribute it and/or # modify it under the terms", "p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence =", "row.label(text=\"Pose Library\") ob = context.object try: poselib = ob.pose_library except: row = layout.row()", "of the current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix back in the", "poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label = \"Pose Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type", "a copy of the GNU General Public License # along with this program;", "return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list of poses in pose", "# frame is the pre-determined frame where print(\"getting there eventually\") # brute force", "(apply to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on to the", "verbose BV_IS_28 = None # global initialization def bv28(): \"\"\"Check if blender 2.8,", "description=\"The mix factor between the original pose and the new pose\", subtype='PERCENTAGE', min=0,", "dynamic mixing between poses in library and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\":", "1-self.influence/100) # mix back in the poses back return {'FINISHED'} @classmethod def poll(cls,", "linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion, not EULER #", "value) else: print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x] =", "getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose # generic function for mixing two poses def", "You should have received a copy of the GNU General Public License #", "not invoke, so menu doesn't need showing pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True)", "print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x],", "= linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] == \"rotation_euler\":", "= linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value)", "\"description\": \"Allows dynamic mixing between poses in library and clipboard\", \"warning\": \"\", \"wiki_url\":", "increase speed, # this is the critical section! #for x in range(len(p[1])): #position", "['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway = \"rotation_quaternion\" # for now, fix later", "\"version\") and bpy.app.version >= (2, 80) return BV_IS_28 def poseAddLimited(ob, frame): # ob", "value) if p[3] == \"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0],", "'ARMATURE' and context.object.mode == 'POSE' ) def pose_tools_panel(self, context): \"\"\"UI for new tool,", "bpy.context.selected_pose_bones for a in b: rotway = a.rotation_mode rotname = '' if rotway", "== 'ARMATURE' and context.object.mode == 'POSE' ) # in the above, remove the", "global initialization def bv28(): \"\"\"Check if blender 2.8, for layouts, UI, and properties.", "p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row =", "details. # # You should have received a copy of the GNU General", "bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose) del bpy.types.Scene.posemixinfluence if __name__ ==", "bl_label = \"Pose Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type = \"UI\" if bv28()", "{'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0,", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "it's a list type, so can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname])", "default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\" ) pose_index =", "\"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list of poses in pose library row", "# v for verbose BV_IS_28 = None # global initialization def bv28(): \"\"\"Check", "b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2],", "\"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True) col.active = (poselib.library is None) col.operator(\"poselib.pose_add\",", "that it will be useful, # but WITHOUT ANY WARRANTY; without even the", "== \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2]", "of the GNU General Public License # along with this program; if not,", "the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA", "for now, fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway", "== \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2]", "the input one? # or even make it a dropdown? and have the", "# mix back in the previous pose return {'FINISHED'} @classmethod def poll(cls, context):", "= context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class", "= bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): # moved from for", "list of currently selected bones. # frame is the pre-determined frame where print(\"getting", "here for which pose, like the input one? # or even make it", "p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0],", "\"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\": (2, 80, 0), \"location\": \"Armature > Pose", "pose with copied pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\",", "pose and the new pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel)", "\"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label = \"Pose Library", "poselib = ob.pose_library except: row = layout.row() row.label(text=\"Select an armature for poses\") return", "program is free software; you can redistribute it and/or # modify it under", "pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\"", "col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2, 80) else \"ADD\", text=\"\") # frame =", "it working in object mode too (apply to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste", "'VIEW_3D' bl_region_type = \"UI\" if bv28() else \"TOOLS\" # bl_context = \"posemode\" bl_category", "pre-determined frame where print(\"getting there eventually\") # brute force copies all location/rotation/scale of", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene,", "elif rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway = \"rotation_quaternion\" # for", "linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value)", "current pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE',", "elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1],", "bpy.app.version >= (2, 80) return BV_IS_28 def poseAddLimited(ob, frame): # ob is the", "it and/or # modify it under the terms of the GNU General Public", "bl_context = \"posemode\" bl_category = \"Tool\" if bv28() else 'Tools' def draw(self, context):", "\"\"\"Post Tools operations\"\"\" bl_label = \"Pose Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type =", "\"Tool\" if bv28() else 'Tools' def draw(self, context): layout = self.layout row =", "if poselib: # list of poses in pose library row = layout.row() row.template_list(\"UI_UL_list\",", "list type, so can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(),", "pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2, 80) else \"REMOVE\",", "\"category\": \"Animation\"} import bpy v = False # v for verbose BV_IS_28 =", "pose, like the input one? # or even make it a dropdown? and", "or even make it a dropdown? and have the numbers become the poseindex", "def poll(cls, context): return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE'", "b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2],", "col = row.column(align=True) col.active = (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version <", "pose, value): def linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select =", "80) else \"ADD\", text=\"\") # frame = int, to bpypass menu add frame", "the hope that it will be useful, # but WITHOUT ANY WARRANTY; without", "= \"Mix current pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\",", "= a.rotation_mode rotname = '' if rotway in ['QUATERNION']: rotname = \"rotation_quaternion\" #", "it a dropdown? and have the numbers become the poseindex below for builtin", "License # along with this program; if not, write to the Free Software", "bl_region_type = \"UI\" if bv28() else \"TOOLS\" # bl_context = \"posemode\" bl_category =", "in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else:", "MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### bl_info =", "bones_select = bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): # moved from for loops to", "= linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for x", "mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on to the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\"", "[] b = bpy.context.selected_pose_bones for a in b: rotway = a.rotation_mode rotname =", "import bpy v = False # v for verbose BV_IS_28 = None #", "of vectors, [loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) #", "# You should have received a copy of the GNU General Public License", "\"\"\"Mix-paste the stored pose on to the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label", "be passed in min = 0, description=\"pose index\" ) # make a property", "value) b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2], value) if p[3]", ") # in the above, remove the last one once I get it", "linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1]", "bones and returns list def getPose(poseCurr): pose = [] b = bpy.context.selected_pose_bones for", "= self.layout col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence", "= layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index", "USA. # # ##### END GPL LICENSE BLOCK ##### bl_info = { \"name\":", "p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else:", "passed in min = 0, description=\"pose index\" ) # make a property here", "mixToPose(ob, pose, value): def linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select", "0, description=\"pose index\" ) # make a property here for which pose, like", "operations\"\"\" bl_label = \"Pose Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type = \"UI\" if", "== '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2]", "bpy.props.IntProperty( name=\"Pose Index\", default= 0, # will be passed in min = 0,", "= (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2, 80) else \"ADD\",", "# rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle': # it's a", "\"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] =", "=linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1]", "version. # # This program is distributed in the hope that it will", "bpypass menu add frame of the last un-used datablock! col.operator_context = 'EXEC_DEFAULT' #", "None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2, 80) else \"REMOVE\", text=\"\") col2 =", "list of poses in pose library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\",", "is the critical section! #for x in range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x],", "showing pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active is not None: col.operator(\"poselib.pose_remove\",", "getPose(poseCurr): pose = [] b = bpy.context.selected_pose_bones for a in b: rotway =", "rotway in ['QUATERNION']: rotname = \"rotation_quaternion\" # for now, fix later elif rotway", "##### bl_info = { \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\":", "GNU General Public License # as published by the Free Software Foundation; either", "(2, 80, 0), \"location\": \"Armature > Pose Library\", \"description\": \"Allows dynamic mixing between", "Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #", "linmix(b.rotation_euler[2], p[1][2], value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1]", "value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] =", "p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2]", "\"\"\" global BV_IS_28 if not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version >=", "if pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2, 80) else", "\"\"\"Check if blender 2.8, for layouts, UI, and properties. \"\"\" global BV_IS_28 if", "properties. \"\"\" global BV_IS_28 if not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version", "A PARTICULAR PURPOSE. See the # GNU General Public License for more details.", "slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label = \"Pose Library Tools\"", "and returns list def getPose(poseCurr): pose = [] b = bpy.context.selected_pose_bones for a", "value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\")", "vectors, [loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix", "Software Foundation; either version 2 # of the License, or (at your option)", "linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for mixing poses", "the terms of the GNU General Public License # as published by the", "under the terms of the GNU General Public License # as published by", "list of vectors, [loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100)", "if rotname == 'rotation_axis_angle': # it's a list type, so can't/no need to", "0, max = 100, description=\"influence\" ) def execute(self, context): ob = context.object prePose", "with this program; if not, write to the Free Software Foundation, # Inc.,", "Pose Library\", \"description\": \"Allows dynamic mixing between poses in library and clipboard\", \"warning\":", "bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix back in the poses back return", "if not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version >= (2, 80) return", ") # make a property here for which pose, like the input one?", "class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose on to the current pose\"\"\" bl_idname", "in attempt to increase speed, # this is the critical section! #for x", "software; you can redistribute it and/or # modify it under the terms of", "= 'rotation_axis_angle' else: rotway = \"rotation_quaternion\" # for now, fix later # rotation", "= linmix(b.scale[2], p[2][2], value) if p[3] == \"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0]", "linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for x in", "BV_IS_28 = None # global initialization def bv28(): \"\"\"Check if blender 2.8, for", "like the input one? # or even make it a dropdown? and have", "return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type == 'ARMATURE' and", "80) else \"REMOVE\", text=\"\") col2 = layout.column(align=True) if poselib: if pose_marker_active is not", "mix factor between the original pose and the new pose\", subtype='PERCENTAGE', min=0, max=100,", "the selected library pose on to the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label", "b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for", "current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix back in the previous pose", "b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] ==", "option) any later version. # # This program is distributed in the hope", "p[1][3], value) elif p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] =", "# GNU General Public License for more details. # # You should have", "\"active_index\", rows=3) col = row.column(align=True) col.active = (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if", "= linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3],", "the last one once I get it working in object mode too (apply", "rotway = a.rotation_mode rotname = '' if rotway in ['QUATERNION']: rotname = \"rotation_quaternion\"", "bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on to the current pose\"\"\" bl_idname", "make a property here for which pose, like the input one? # or", "100, description=\"influence\" ) def execute(self, context): ob = context.object prePose = getPose(ob.pose) #get", "the GNU General Public License # as published by the Free Software Foundation;", "layout.row() row.label(text=\"Select an armature for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib:", ") pose_index = bpy.props.IntProperty( name=\"Pose Index\", default= 0, # will be passed in", "linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value)", "Index\", default= 0, # will be passed in min = 0, description=\"pose index\"", "# along with this program; if not, write to the Free Software Foundation,", "= False # v for verbose BV_IS_28 = None # global initialization def", "= context.object try: poselib = ob.pose_library except: row = layout.row() row.label(text=\"Select an armature", "in min = 0, description=\"pose index\" ) # make a property here for", "all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on to the current pose\"\"\"", "# b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value)", "= self.layout row = layout.row() row.label(text=\"Pose Library\") ob = context.object try: poselib =", "= linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3],", "value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1],", "\"rotation_quaternion\" # for now, fix later # rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if", "a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose # generic function", "working in object mode too (apply to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the", "subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\" ) def execute(self, context):", "Library\", \"description\": \"Allows dynamic mixing between poses in library and clipboard\", \"warning\": \"\",", "rotation_axis_angle, rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle': # it's a list type, so", "bl_space_type = 'VIEW_3D' bl_region_type = \"UI\" if bv28() else \"TOOLS\" # bl_context =", "modify it under the terms of the GNU General Public License # as", "value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0],", "##### # # This program is free software; you can redistribute it and/or", "# # This program is free software; you can redistribute it and/or #", "value) b.scale[2] = linmix(b.scale[2], p[2][2], value) if p[3] == \"rotation_quaternion\" or p[3] ==", "= 0, max = 100, description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose Index\", default=", "range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1],", "back in the previous pose return {'FINISHED'} @classmethod def poll(cls, context): return (context.object", "def pose_tools_panel(self, context): \"\"\"UI for new tool, drawn next to the built-in post", "published by the Free Software Foundation; either version 2 # of the License,", "= int, to bpypass menu add frame of the last un-used datablock! col.operator_context", "menu add frame of the last un-used datablock! col.operator_context = 'EXEC_DEFAULT' # exec", "to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on to the current", "bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected", "built-in post library tools in armature tab\"\"\" layout = self.layout col = layout.split(align=True)", "# modify it under the terms of the GNU General Public License #", "a COPY of the current pose ob = context.object prePose = getPose(ob.pose) #", "the object/armature, should get the list of currently selected bones. # frame is", "else: rotway = \"rotation_quaternion\" # for now, fix later # rotation modes: rotation_axis_angle,", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General", "need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return", "bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min", "bones. # frame is the pre-determined frame where print(\"getting there eventually\") # brute", "# will be passed in min = 0, description=\"pose index\" ) # make", "where print(\"getting there eventually\") # brute force copies all location/rotation/scale of all bones", "= 100, description=\"influence\" ) def execute(self, context): ob = context.object prePose = getPose(ob.pose)", "rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle'", "col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label = \"Pose", "02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### bl_info = {", "'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max", "one? # or even make it a dropdown? and have the numbers become", "to hard coded in attempt to increase speed, # this is the critical", "= \"Tool\" if bv28() else 'Tools' def draw(self, context): layout = self.layout row", "poses in pose library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\",", "p[3] == \"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1]", "in ['QUATERNION']: rotname = \"rotation_quaternion\" # for now, fix later elif rotway in", "linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value)", "in pose library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3)", "prePose, 1-self.influence/100) # mix back in the poses back return {'FINISHED'} @classmethod def", "bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose) del bpy.types.Scene.posemixinfluence if __name__", "linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value)", "unlink=\"poselib.unlink\") if poselib: # list of poses in pose library row = layout.row()", "mixing between poses in library and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"}", "is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2, 80) else \"ADD\", text=\"\") #", "= {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min =", "else \"TOOLS\" # bl_context = \"posemode\" bl_category = \"Tool\" if bv28() else 'Tools'", "value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0],", "class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label = \"Pose Library Tools\" bl_space_type = 'VIEW_3D'", "##### END GPL LICENSE BLOCK ##### bl_info = { \"name\": \"PoseTools\", \"author\": \"<NAME>", "poses def mixToPose(ob, pose, value): def linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert =", "p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1] =", "blender 2.8, for layouts, UI, and properties. \"\"\" global BV_IS_28 if not BV_IS_28:", "a COPY of the current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix back", "exec not invoke, so menu doesn't need showing pose_marker_active = poselib.pose_markers.active col2 =", "selected bones. # frame is the pre-determined frame where print(\"getting there eventually\") #", "= bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): # moved from for loops to hard", "and/or # modify it under the terms of the GNU General Public License", "not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth", "copies all location/rotation/scale of all bones and returns list def getPose(poseCurr): pose =", "a list of vectors, [loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose,", "b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0]", "bv28() else 'Tools' def draw(self, context): layout = self.layout row = layout.row() row.label(text=\"Pose", "rotname == 'rotation_axis_angle': # it's a list type, so can't/no need to .copy()", "\"poselib.mixcurrpose\" bl_label = \"Mix current pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty(", "Public License # as published by the Free Software Foundation; either version 2", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "frame = int, to bpypass menu add frame of the last un-used datablock!", "terms of the GNU General Public License # as published by the Free", "ob = context.object prePose = getPose(ob.pose) # each element is a list of", "b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0],", "initialization def bv28(): \"\"\"Check if blender 2.8, for layouts, UI, and properties. \"\"\"", "a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose # generic function for", "= bpy.context.selected_pose_bones for a in b: rotway = a.rotation_mode rotname = '' if", "\"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\": (2, 80, 0), \"location\": \"Armature", "= context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence", "previous pose return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type ==", "later # rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle': # it's", "Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK", "prePose, 1-self.influence/100) # mix back in the previous pose return {'FINISHED'} @classmethod def", "as published by the Free Software Foundation; either version 2 # of the", "bl_info = { \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\": (2,", "poses back return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type ==", "0, max = 100, description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose Index\", default= 0,", "0, # will be passed in min = 0, description=\"pose index\" ) #", "even make it a dropdown? and have the numbers become the poseindex below", "GPL LICENSE BLOCK ##### # # This program is free software; you can", "pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix back in the previous pose return", "influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max =", "and the new pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def", "COPY of the current pose ob = context.object prePose = getPose(ob.pose) # each", "int, to bpypass menu add frame of the last un-used datablock! col.operator_context =", "{ \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\": (2, 80, 0),", "= linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2],", "one once I get it working in object mode too (apply to all", "layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True) col.active =", "[loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix back", "in range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1]", "['QUATERNION']: rotname = \"rotation_quaternion\" # for now, fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']:", "UI, and properties. \"\"\" global BV_IS_28 if not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\")", "along with this program; if not, write to the Free Software Foundation, #", "it will be useful, # but WITHOUT ANY WARRANTY; without even the implied", "# of the License, or (at your option) any later version. # #", "80) return BV_IS_28 def poseAddLimited(ob, frame): # ob is the object/armature, should get", "= 100, description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose Index\", default= 0, # will", "v for verbose BV_IS_28 = None # global initialization def bv28(): \"\"\"Check if", "rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose # generic function for mixing", "moved from for loops to hard coded in attempt to increase speed, #", "and have the numbers become the poseindex below for builtin def execute(self, context):", "#get a COPY of the current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix", "(context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) # in the", "returns list def getPose(poseCurr): pose = [] b = bpy.context.selected_pose_bones for a in", "mixing two poses def mixToPose(ob, pose, value): def linmix(orig, new, factor): return orig*(1-factor)+new*factor", "b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0], value)", "p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif", "context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools", "rotname = 'rotation_axis_angle' else: rotway = \"rotation_quaternion\" # for now, fix later #", "\"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v = False # v for verbose", "version 2 # of the License, or (at your option) any later version.", "b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif p[3] ==", "prePose = getPose(ob.pose) # each element is a list of vectors, [loc, rot", "COPY of the current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix back in", "General Public License for more details. # # You should have received a", "bpy v = False # v for verbose BV_IS_28 = None # global", "poll(cls, context): return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' )", "0), \"location\": \"Armature > Pose Library\", \"description\": \"Allows dynamic mixing between poses in", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "get it working in object mode too (apply to all bones..) class mixedPosePaste(bpy.types.Operator):", "# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # #", "is the pre-determined frame where print(\"getting there eventually\") # brute force copies all", "in b: rotway = a.rotation_mode rotname = '' if rotway in ['QUATERNION']: rotname", "in armature tab\"\"\" layout = self.layout col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed", "def mixToPose(ob, pose, value): def linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto", "influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\" ) def", "in the poses back return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and", "and bpy.app.version >= (2, 80) return BV_IS_28 def poseAddLimited(ob, frame): # ob is", "bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): # moved from for loops to hard coded", "== 'POSE' ) # in the above, remove the last one once I", "self.layout col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence if", "= hasattr(bpy.app, \"version\") and bpy.app.version >= (2, 80) return BV_IS_28 def poseAddLimited(ob, frame):", "context): layout = self.layout row = layout.row() row.label(text=\"Pose Library\") ob = context.object try:", "name=\"Mix\", description=\"The mix factor between the original pose and the new pose\", subtype='PERCENTAGE',", "if blender 2.8, for layouts, UI, and properties. \"\"\" global BV_IS_28 if not", "= context.object prePose = getPose(ob.pose) # each element is a list of vectors,", "in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway = \"rotation_quaternion\" # for now, fix", "(2, 80) else \"ADD\", text=\"\") # frame = int, to bpypass menu add", "# moved from for loops to hard coded in attempt to increase speed,", "Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between the original", ") def pose_tools_panel(self, context): \"\"\"UI for new tool, drawn next to the built-in", "#get a COPY of the current pose ob = context.object prePose = getPose(ob.pose)", "speed, # this is the critical section! #for x in range(len(p[1])): #position #", "Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,", "max = 100, description=\"influence\" ) def execute(self, context): ob = context.object prePose =", "b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion, not", "poselib: # list of poses in pose library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\",", "last un-used datablock! col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so menu doesn't", "#position # b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1],", "\"\"\"Mix-apply the selected library pose on to the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\"", "for a in b: rotway = a.rotation_mode rotname = '' if rotway in", "coded in attempt to increase speed, # this is the critical section! #for", "= bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max = 100,", "the # GNU General Public License for more details. # # You should", "pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix current pose with copied pose\" bl_options", "modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle': # it's a list type,", "the poses back return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type", "col.active = (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2, 80) else", "name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\" )", "bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix back in the previous pose return {'FINISHED'}", "= col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene,", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License", "min = 0, max = 100, description=\"influence\" ) def execute(self, context): ob =", "for more details. # # You should have received a copy of the", "p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2], value) if p[3] == \"rotation_quaternion\" or p[3]", "= 'EXEC_DEFAULT' # exec not invoke, so menu doesn't need showing pose_marker_active =", "slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between", "pose on to the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix current", "return BV_IS_28 def poseAddLimited(ob, frame): # ob is the object/armature, should get the", "BV_IS_28 def poseAddLimited(ob, frame): # ob is the object/armature, should get the list", "'rotation_axis_angle': # it's a list type, so can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle,", "or (at your option) any later version. # # This program is distributed", "bv28() else \"TOOLS\" # bl_context = \"posemode\" bl_category = \"Tool\" if bv28() else", "rotation_quaternion if rotname == 'rotation_axis_angle': # it's a list type, so can't/no need", "(poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2, 80) else \"ADD\", text=\"\")", "fix later # rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle': #", "rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix back in the poses back", "below for builtin def execute(self, context): #get a COPY of the current pose", "autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): # moved from", "b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2],", "become the poseindex below for builtin def execute(self, context): #get a COPY of", "value) elif p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1],", "'EXEC_DEFAULT' # exec not invoke, so menu doesn't need showing pose_marker_active = poselib.pose_markers.active", "# generic function for mixing two poses def mixToPose(ob, pose, value): def linmix(orig,", "for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list of poses", "poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list of poses in", "pose library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col", "make it a dropdown? and have the numbers become the poseindex below for", "more details. # # You should have received a copy of the GNU", "def getPose(poseCurr): pose = [] b = bpy.context.selected_pose_bones for a in b: rotway", "section! #for x in range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0],", "of the last un-used datablock! col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2], value)", "GNU General Public License for more details. # # You should have received", "tools in armature tab\"\"\" layout = self.layout col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply", "context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label =", "original pose and the new pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste)", "remove the last one once I get it working in object mode too", "above, remove the last one once I get it working in object mode", "the previous pose return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type", "the critical section! #for x in range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x], value)", "else \"REMOVE\", text=\"\") col2 = layout.column(align=True) if poselib: if pose_marker_active is not None:", "= context.object prePose = getPose(ob.pose) #get a COPY of the current pose bpy.ops.pose.paste()", "bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\"", "new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list of poses in pose library row =", "for loops to hard coded in attempt to increase speed, # this is", "menu doesn't need showing pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active is", "program is distributed in the hope that it will be useful, # but", "context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The", "the list of currently selected bones. # frame is the pre-determined frame where", "library tools in armature tab\"\"\" layout = self.layout col = layout.split(align=True) p =", "layout = self.layout col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence =", "tool for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose on to", "distributed in the hope that it will be useful, # but WITHOUT ANY", "if pose_marker_active is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence", "if rotway in ['QUATERNION']: rotname = \"rotation_quaternion\" # for now, fix later elif", "b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2]", "Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # #####", "execute(self, context): ob = context.object prePose = getPose(ob.pose) #get a COPY of the", "# The tool for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose", "ob = context.object try: poselib = ob.pose_library except: row = layout.row() row.label(text=\"Select an", "function for mixing two poses def mixToPose(ob, pose, value): def linmix(orig, new, factor):", "rows=3) col = row.column(align=True) col.active = (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version", "p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3] == \"rotation_euler\": b.rotation_euler[0] =", "b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for", "is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2, 80) else \"REMOVE\", text=\"\")", "description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose Index\", default= 0, # will be passed", "def execute(self, context): ob = context.object prePose = getPose(ob.pose) #get a COPY of", "register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between the original pose and", "any later version. # # This program is distributed in the hope that", "if not, write to the Free Software Foundation, # Inc., 51 Franklin Street,", "once I get it working in object mode too (apply to all bones..)", "in the above, remove the last one once I get it working in", "= \"poselib.mixcurrpose\" bl_label = \"Mix current pose\" bl_options = {'REGISTER', 'UNDO'} influence =", "new tool, drawn next to the built-in post library tools in armature tab\"\"\"", "the current pose ob = context.object prePose = getPose(ob.pose) # each element is", "if p[3] == \"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value)", "the poseindex below for builtin def execute(self, context): #get a COPY of the", "and context.object.mode == 'POSE' ) # in the above, remove the last one", "context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence", "of the License, or (at your option) any later version. # # This", "range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale')", "row.label(text=\"Select an armature for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: #", "# for now, fix later # rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname", "if poselib: if pose_marker_active is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence", "text=\"\") # frame = int, to bpypass menu add frame of the last", "layout.row() row.label(text=\"Pose Library\") ob = context.object try: poselib = ob.pose_library except: row =", "2.8, for layouts, UI, and properties. \"\"\" global BV_IS_28 if not BV_IS_28: BV_IS_28", "for mixing two poses def mixToPose(ob, pose, value): def linmix(orig, new, factor): return", "context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel):", "= 0, max = 100, description=\"influence\" ) def execute(self, context): ob = context.object", "on to the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix current pose", "col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\",", "value): def linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones", "a property here for which pose, like the input one? # or even", "['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway", "col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix", "This program is free software; you can redistribute it and/or # modify it", "the last un-used datablock! col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so menu", "BLOCK ##### # # This program is free software; you can redistribute it", "an armature for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list", "dropdown? and have the numbers become the poseindex below for builtin def execute(self,", "value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] =", "poselib: if pose_marker_active is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence =", "PARTICULAR PURPOSE. See the # GNU General Public License for more details. #", "new pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel)", "of all bones and returns list def getPose(poseCurr): pose = [] b =", "(context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) def pose_tools_panel(self, context):", ".copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose #", "p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index", "else \"ADD\", text=\"\") # frame = int, to bpypass menu add frame of", "Public License # along with this program; if not, write to the Free", "< (2, 80) else \"REMOVE\", text=\"\") col2 = layout.column(align=True) if poselib: if pose_marker_active", "bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose) del bpy.types.Scene.posemixinfluence if __name__ == \"__main__\":", "pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix", "b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3],", "hope that it will be useful, # but WITHOUT ANY WARRANTY; without even", "rotname]) return pose # generic function for mixing two poses def mixToPose(ob, pose,", "later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname", "value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion,", "index\" ) # make a property here for which pose, like the input", "the current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) # mix back in the previous", "bv28(): \"\"\"Check if blender 2.8, for layouts, UI, and properties. \"\"\" global BV_IS_28", "= linmix(b.rotation_euler[2], p[1][2], value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value)", "row.column(align=True) col.active = (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2, 80)", "for builtin def execute(self, context): #get a COPY of the current pose ob", "(2, 80) return BV_IS_28 def poseAddLimited(ob, frame): # ob is the object/armature, should", "context): #get a COPY of the current pose ob = context.object prePose =", "subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\" ) pose_index = bpy.props.IntProperty(", "input one? # or even make it a dropdown? and have the numbers", "element is a list of vectors, [loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index)", "try: poselib = ob.pose_library except: row = layout.row() row.label(text=\"Select an armature for poses\")", "\"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1],", "stored pose on to the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix", "\"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway = \"rotation_quaternion\" #", "pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE', unit='NONE',", "a.scale.copy(), rotname]) return pose # generic function for mixing two poses def mixToPose(ob,", "100, description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose Index\", default= 0, # will be", "# mix back in the poses back return {'FINISHED'} @classmethod def poll(cls, context):", "poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True) col.active = (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\"", "scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix back in the poses", "(2, 80) else \"REMOVE\", text=\"\") col2 = layout.column(align=True) if poselib: if pose_marker_active is", "eventually\") # brute force copies all location/rotation/scale of all bones and returns list", "frame of the last un-used datablock! col.operator_context = 'EXEC_DEFAULT' # exec not invoke,", "return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): #", "bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): # moved from for loops", "name=\"Pose Index\", default= 0, # will be passed in min = 0, description=\"pose", "\"Pose Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type = \"UI\" if bv28() else \"TOOLS\"", "(at your option) any later version. # # This program is distributed in", "This program is distributed in the hope that it will be useful, #", "False # v for verbose BV_IS_28 = None # global initialization def bv28():", "poseindex below for builtin def execute(self, context): #get a COPY of the current", "useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #", "loops to hard coded in attempt to increase speed, # this is the", "= 0, description=\"pose index\" ) # make a property here for which pose,", "prePose = getPose(ob.pose) #get a COPY of the current pose bpy.ops.pose.paste() mixToPose(ob, prePose,", "layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index =", "GNU General Public License # along with this program; if not, write to", "hard coded in attempt to increase speed, # this is the critical section!", "'POSE' ) def pose_tools_panel(self, context): \"\"\"UI for new tool, drawn next to the", "\"\"\"UI for new tool, drawn next to the built-in post library tools in", "received a copy of the GNU General Public License # along with this", "bl_label = \"Mix current pose with copied pose\" bl_options = {'REGISTER', 'UNDO'} influence", "context.object.mode == 'POSE' ) # in the above, remove the last one once", "# bl_context = \"posemode\" bl_category = \"Tool\" if bv28() else 'Tools' def draw(self,", "= row.column(align=True) col.active = (poselib.library is None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2,", "b.scale[2] = linmix(b.scale[2], p[2][2], value) if p[3] == \"rotation_quaternion\" or p[3] == '':", "= { \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\": (2, 80,", "p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2], value) if", "pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose # generic function for mixing two poses", "row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\",", "all bones and returns list def getPose(poseCurr): pose = [] b = bpy.context.selected_pose_bones", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p", "= '' if rotway in ['QUATERNION']: rotname = \"rotation_quaternion\" # for now, fix", "is the object/armature, should get the list of currently selected bones. # frame", "Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### bl_info", "context): return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) def", "value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif p[3]", "linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value)", "builtin def execute(self, context): #get a COPY of the current pose ob =", "orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p in zip(bones_select,pose): # moved", "print(\"getting there eventually\") # brute force copies all location/rotation/scale of all bones and", "else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose # generic function for mixing two", "selected library pose on to the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label =", "# # This program is distributed in the hope that it will be", "p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2],", "from for loops to hard coded in attempt to increase speed, # this", "pose_index = bpy.props.IntProperty( name=\"Pose Index\", default= 0, # will be passed in min", "to the built-in post library tools in armature tab\"\"\" layout = self.layout col", "pose\") p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\")", "for layouts, UI, and properties. \"\"\" global BV_IS_28 if not BV_IS_28: BV_IS_28 =", "# global initialization def bv28(): \"\"\"Check if blender 2.8, for layouts, UI, and", "factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p in zip(bones_select,pose):", "default= 0, # will be passed in min = 0, description=\"pose index\" )", "= col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True,", "= linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif p[3] == \"rotation_axis_angle\":", "# # ##### END GPL LICENSE BLOCK ##### bl_info = { \"name\": \"PoseTools\",", "\"Animation\"} import bpy v = False # v for verbose BV_IS_28 = None", "return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) def pose_tools_panel(self,", "be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of", "armature for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list of", "can redistribute it and/or # modify it under the terms of the GNU", "poseAddLimited(ob, frame): # ob is the object/armature, should get the list of currently", "1-self.influence/100) # mix back in the previous pose return {'FINISHED'} @classmethod def poll(cls,", "\"ADD\", text=\"\") # frame = int, to bpypass menu add frame of the", "= poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if", "for b,p in zip(bones_select,pose): # moved from for loops to hard coded in", "the pre-determined frame where print(\"getting there eventually\") # brute force copies all location/rotation/scale", "mixToPose(ob, prePose, 1-self.influence/100) # mix back in the poses back return {'FINISHED'} @classmethod", "doesn't need showing pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active is not", "bl_label = \"Mix current pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix", "linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0]", "it under the terms of the GNU General Public License # as published", "mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose on to the current", "is distributed in the hope that it will be useful, # but WITHOUT", "rotname = \"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway =", "between the original pose and the new pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose)", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public", "in library and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v", "=linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2],", "in the hope that it will be useful, # but WITHOUT ANY WARRANTY;", "bpy.app.version < (2, 80) else \"REMOVE\", text=\"\") col2 = layout.column(align=True) if poselib: if", "= layout.column(align=True) if poselib: if pose_marker_active is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for", "mixToPose(ob, prePose, 1-self.influence/100) # mix back in the previous pose return {'FINISHED'} @classmethod", "# brute force copies all location/rotation/scale of all bones and returns list def", "max = 100, description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose Index\", default= 0, #", "p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value)", "= [] b = bpy.context.selected_pose_bones for a in b: rotway = a.rotation_mode rotname", "= linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2], value) if p[3] == \"rotation_quaternion\"", "BLOCK ##### bl_info = { \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3),", "this is the critical section! #for x in range(len(p[1])): #position # b.location[x] =linmix(b.location[x],", "linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2], value) if p[3] == \"rotation_quaternion\" or", "back in the poses back return {'FINISHED'} @classmethod def poll(cls, context): return (context.object", "@classmethod def poll(cls, context): return (context.object and context.object.type == 'ARMATURE' and context.object.mode ==", "row = layout.row() row.label(text=\"Pose Library\") ob = context.object try: poselib = ob.pose_library except:", "mode too (apply to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on", "Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty(", "bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between the original pose and the", "current pose with copied pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix", "Library\") ob = context.object try: poselib = ob.pose_library except: row = layout.row() row.label(text=\"Select", "fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway in ['AXIS_ANGLE']:", "the above, remove the last one once I get it working in object", "the GNU General Public License # along with this program; if not, write", "# this is the critical section! #for x in range(len(p[1])): #position # b.location[x]", "attempt to increase speed, # this is the critical section! #for x in", "mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose on to the current pose\"\"\" bl_idname =", "copied pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100, subtype='PERCENTAGE',", "to increase speed, # this is the critical section! #for x in range(len(p[1])):", "mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy", "\"rotation_quaternion\" # for now, fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\"", "can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname])", "text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence =", "\"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True) col.active = (poselib.library is", "in zip(bones_select,pose): # moved from for loops to hard coded in attempt to", "p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\")", "icon=\"ZOOMOUT\" if bpy.app.version < (2, 80) else \"REMOVE\", text=\"\") col2 = layout.column(align=True) if", "to the current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix current pose\" bl_options", "p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\",", "# This program is free software; you can redistribute it and/or # modify", "\"Armature > Pose Library\", \"description\": \"Allows dynamic mixing between poses in library and", "numbers become the poseindex below for builtin def execute(self, context): #get a COPY", "def draw(self, context): layout = self.layout row = layout.row() row.label(text=\"Pose Library\") ob =", "\"Allows dynamic mixing between poses in library and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\",", "critical section! #for x in range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0]", "text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between the", "p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif", "def linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for", "= linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2],", "which pose, like the input one? # or even make it a dropdown?", "getPose(ob.pose) #get a COPY of the current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100) #", "(1, 3), \"blender\": (2, 80, 0), \"location\": \"Armature > Pose Library\", \"description\": \"Allows", "# b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool", "= layout.row() row.label(text=\"Select an armature for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if", "to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston,", "'': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] =", "p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value)", "context): return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) #", "context): \"\"\"UI for new tool, drawn next to the built-in post library tools", "col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so menu doesn't need showing pose_marker_active", "so menu doesn't need showing pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active", "location/rotation/scale of all bones and returns list def getPose(poseCurr): pose = [] b", "to bpypass menu add frame of the last un-used datablock! col.operator_context = 'EXEC_DEFAULT'", "redistribute it and/or # modify it under the terms of the GNU General", "linmix(b.scale[2], p[2][2], value) if p[3] == \"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0] =", "The tool for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose on", "# This program is distributed in the hope that it will be useful,", "should get the list of currently selected bones. # frame is the pre-determined", "value) b.location[0] =linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value)", "type, so can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(),", "but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "= bpy.props.IntProperty( name=\"Pose Index\", default= 0, # will be passed in min =", "pose_tools_panel(self, context): \"\"\"UI for new tool, drawn next to the built-in post library", "\"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] =", "by the Free Software Foundation; either version 2 # of the License, or", "the original pose and the new pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf)", "\"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\": (2, 80, 0), \"location\": \"Armature >", "#bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix back in the poses back return {'FINISHED'}", "program; if not, write to the Free Software Foundation, # Inc., 51 Franklin", "the Free Software Foundation; either version 2 # of the License, or (at", "if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post", "the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix current pose with copied", "rotname = '' if rotway in ['QUATERNION']: rotname = \"rotation_quaternion\" # for now,", "too (apply to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on to", "\"Mix current pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty( name=\"Mix influence\", default=100,", "global BV_IS_28 if not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version >= (2,", "col2 = layout.column(align=True) if pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version <", "of the GNU General Public License # as published by the Free Software", "pose_marker_active is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index", "and context.object.mode == 'POSE' ) def pose_tools_panel(self, context): \"\"\"UI for new tool, drawn", "pose on to the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix current", "invoke, so menu doesn't need showing pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True) if", "row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\")", "2 # of the License, or (at your option) any later version. #", "\"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v = False # v for verbose BV_IS_28", "the numbers become the poseindex below for builtin def execute(self, context): #get a", "bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose) del bpy.types.Scene.posemixinfluence if", "object/armature, should get the list of currently selected bones. # frame is the", "== 'ARMATURE' and context.object.mode == 'POSE' ) def pose_tools_panel(self, context): \"\"\"UI for new", "51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END", ">= (2, 80) return BV_IS_28 def poseAddLimited(ob, frame): # ob is the object/armature,", "draw(self, context): layout = self.layout row = layout.row() row.label(text=\"Pose Library\") ob = context.object", "General Public License # as published by the Free Software Foundation; either version", "pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste)", "ob = context.object prePose = getPose(ob.pose) #get a COPY of the current pose", "new, factor): return orig*(1-factor)+new*factor autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto bones_select = bpy.context.selected_pose_bones for b,p in", "\"posemode\" bl_category = \"Tool\" if bv28() else 'Tools' def draw(self, context): layout =", "# for now, fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif", "License for more details. # # You should have received a copy of", "write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor,", "None # global initialization def bv28(): \"\"\"Check if blender 2.8, for layouts, UI,", "BV_IS_28 if not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version >= (2, 80)", ") def execute(self, context): ob = context.object prePose = getPose(ob.pose) #get a COPY", "Tools\" bl_space_type = 'VIEW_3D' bl_region_type = \"UI\" if bv28() else \"TOOLS\" # bl_context", "layout.column(align=True) if poselib: if pose_marker_active is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\")", "= \"rotation_euler\" elif rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway = \"rotation_quaternion\"", "influence\", default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\" ) pose_index", "Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL", "80, 0), \"location\": \"Armature > Pose Library\", \"description\": \"Allows dynamic mixing between poses", "or p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1],", "else: print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x],", "#for x in range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value)", "layout.column(align=True) if pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2, 80)", "datablock! col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so menu doesn't need showing", "for now, fix later # rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname ==", "\"posemixinfluence\", slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor", "rotway = \"rotation_quaternion\" # for now, fix later # rotation modes: rotation_axis_angle, rotation_euler,", "b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3],", "bpy.app.version < (2, 80) else \"ADD\", text=\"\") # frame = int, to bpypass", "frame is the pre-determined frame where print(\"getting there eventually\") # brute force copies", "execute(self, context): #get a COPY of the current pose ob = context.object prePose", "max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose) del", "END GPL LICENSE BLOCK ##### bl_info = { \"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\",", "v = False # v for verbose BV_IS_28 = None # global initialization", "None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row", "p[1][2], value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] =", "need showing pose_marker_active = poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active is not None:", "context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) def pose_tools_panel(self, context): \"\"\"UI for", "row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence", "min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose)", "description=\"pose index\" ) # make a property here for which pose, like the", "is a list of vectors, [loc, rot (quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob,", "= context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\",", "mix back in the poses back return {'FINISHED'} @classmethod def poll(cls, context): return", "of currently selected bones. # frame is the pre-determined frame where print(\"getting there", "icon=\"ZOOMIN\" if bpy.app.version < (2, 80) else \"ADD\", text=\"\") # frame = int,", "=linmix(b.location[0], p[0][0], value) b.location[1] =linmix(b.location[1], p[0][1], value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] =", "\"version\": (1, 3), \"blender\": (2, 80, 0), \"location\": \"Armature > Pose Library\", \"description\":", "in the previous pose return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and", "factor between the original pose and the new pose\", subtype='PERCENTAGE', min=0, max=100, default=100)", "#for x in range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0],", "col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix", "def execute(self, context): #get a COPY of the current pose ob = context.object", "not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2, 80) else \"REMOVE\", text=\"\") col2", "this program; if not, write to the Free Software Foundation, # Inc., 51", "for verbose BV_IS_28 = None # global initialization def bv28(): \"\"\"Check if blender", "\"poselib.mixedposepaste\" bl_label = \"Mix current pose with copied pose\" bl_options = {'REGISTER', 'UNDO'}", "= linmix(b.rotation_axis_angle[3], p[1][3], value) else: print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion, not EULER", "in object mode too (apply to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored", "b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2],", "is free software; you can redistribute it and/or # modify it under the", "current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix current pose with copied pose\"", "pose # generic function for mixing two poses def mixToPose(ob, pose, value): def", "context.object.mode == 'POSE' ) def pose_tools_panel(self, context): \"\"\"UI for new tool, drawn next", "= linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for mixing", "p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] =", "library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col =", "Tools operations\"\"\" bl_label = \"Pose Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type = \"UI\"", "now, fix later # rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion if rotname == 'rotation_axis_angle':", "# each element is a list of vectors, [loc, rot (quat.), scale, rottype]", "elif p[3] == \"rotation_euler\": b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1],", "License # as published by the Free Software Foundation; either version 2 #", "\"name\": \"PoseTools\", \"author\": \"<NAME> <<EMAIL>>\", \"version\": (1, 3), \"blender\": (2, 80, 0), \"location\":", "if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply", "= \"UI\" if bv28() else \"TOOLS\" # bl_context = \"posemode\" bl_category = \"Tool\"", "See the # GNU General Public License for more details. # # You", "b,p in zip(bones_select,pose): # moved from for loops to hard coded in attempt", "Free Software Foundation; either version 2 # of the License, or (at your", "< (2, 80) else \"ADD\", text=\"\") # frame = int, to bpypass menu", "text=\"\") col2 = layout.column(align=True) if poselib: if pose_marker_active is not None: p =", "pose return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type == 'ARMATURE'", "row = layout.row() row.label(text=\"Select an armature for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\")", "= linmix(b.rotation_euler[0], p[1][0], value) b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2],", "= getPose(ob.pose) #get a COPY of the current pose bpy.ops.pose.paste() mixToPose(ob, prePose, 1-self.influence/100)", "rotway in ['AXIS_ANGLE']: rotname = 'rotation_axis_angle' else: rotway = \"rotation_quaternion\" # for now,", "a dropdown? and have the numbers become the poseindex below for builtin def", "if bv28() else \"TOOLS\" # bl_context = \"posemode\" bl_category = \"Tool\" if bv28()", "'ARMATURE' and context.object.mode == 'POSE' ) # in the above, remove the last", "if bv28() else 'Tools' def draw(self, context): layout = self.layout row = layout.row()", "'Tools' def draw(self, context): layout = self.layout row = layout.row() row.label(text=\"Pose Library\") ob", "= context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\", text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed", "== 'POSE' ) def pose_tools_panel(self, context): \"\"\"UI for new tool, drawn next to", "(quat.), scale, rottype] bpy.ops.poselib.apply_pose(pose_index=self.pose_index) #bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index) mixToPose(ob, prePose, 1-self.influence/100) # mix back in the", "brute force copies all location/rotation/scale of all bones and returns list def getPose(poseCurr):", "tab\"\"\" layout = self.layout col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence", "your option) any later version. # # This program is distributed in the", "back return {'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type == 'ARMATURE'", "zip(bones_select,pose): # moved from for loops to hard coded in attempt to increase", "value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value) elif p[3]", "if bpy.app.version < (2, 80) else \"REMOVE\", text=\"\") col2 = layout.column(align=True) if poselib:", "bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix current pose with copied pose\" bl_options =", "> Pose Library\", \"description\": \"Allows dynamic mixing between poses in library and clipboard\",", "not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version >= (2, 80) return BV_IS_28", "== \"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value) b.rotation_quaternion[1] =", "and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v = False", "unit='NONE', min = 0, max = 100, description=\"influence\" ) pose_index = bpy.props.IntProperty( name=\"Pose", "a list type, so can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else:", "rotname = \"rotation_quaternion\" # for now, fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname", "pose ob = context.object prePose = getPose(ob.pose) # each element is a list", "\"Mix current pose with copied pose\" bl_options = {'REGISTER', 'UNDO'} influence = bpy.props.FloatProperty(", "mix back in the previous pose return {'FINISHED'} @classmethod def poll(cls, context): return", "bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between the original pose and the new pose\",", "poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True) col.active = (poselib.library is None)", "def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose) del bpy.types.Scene.posemixinfluence if __name__ == \"__main__\": register()", "col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library:", "for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose on to the", "= \"Mix current pose with copied pose\" bl_options = {'REGISTER', 'UNDO'} influence =", "un-used datablock! col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so menu doesn't need", "#### BEGIN GPL LICENSE BLOCK ##### # # This program is free software;", "b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0],", "p[1][3], value) else: print(\"ERROR!\") #for x in range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x]", "b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1],", "\"REMOVE\", text=\"\") col2 = layout.column(align=True) if poselib: if pose_marker_active is not None: p", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "later version. # # This program is distributed in the hope that it", "# list of poses in pose library row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib,", "you can redistribute it and/or # modify it under the terms of the", "last one once I get it working in object mode too (apply to", "p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\"", "ob is the object/armature, should get the list of currently selected bones. #", "not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### #", "unit='NONE', min = 0, max = 100, description=\"influence\" ) def execute(self, context): ob", "Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label = \"Pose Library Tools\" bl_space_type =", "so can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(),", "None) col.operator(\"poselib.pose_add\", icon=\"ZOOMIN\" if bpy.app.version < (2, 80) else \"ADD\", text=\"\") # frame", "text=\"Copy Pose\") row.operator(\"poselib.mixedposepaste\", text=\"Mixed Paste\").influence = context.scene.posemixinfluence col2.prop(context.scene, \"posemixinfluence\", slider=True, text=\"Mix Influence\") def", "the built-in post library tools in armature tab\"\"\" layout = self.layout col =", "except: row = layout.row() row.label(text=\"Select an armature for poses\") return layout.template_ID(ob, \"pose_library\", new=\"poselib.new\",", "for which pose, like the input one? # or even make it a", "WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS", "= layout.column(align=True) if pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2,", "ob.pose_library except: row = layout.row() row.label(text=\"Select an armature for poses\") return layout.template_ID(ob, \"pose_library\",", "= col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True)", "# # You should have received a copy of the GNU General Public", "<reponame>TheDuckCow/pose-tools #### BEGIN GPL LICENSE BLOCK ##### # # This program is free", "value) b.location[2] =linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1],", "current pose\"\"\" bl_idname = \"poselib.mixcurrpose\" bl_label = \"Mix current pose\" bl_options = {'REGISTER',", "all location/rotation/scale of all bones and returns list def getPose(poseCurr): pose = []", "# in the above, remove the last one once I get it working", "force copies all location/rotation/scale of all bones and returns list def getPose(poseCurr): pose", "p[1][0], value) b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value) b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value) b.rotation_axis_angle[3]", "in range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert:", "to the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label = \"Mix current pose with", "FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more", "have received a copy of the GNU General Public License # along with", "<<EMAIL>>\", \"version\": (1, 3), \"blender\": (2, 80, 0), \"location\": \"Armature > Pose Library\",", "value) b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2] =", "two poses def mixToPose(ob, pose, value): def linmix(orig, new, factor): return orig*(1-factor)+new*factor autoinsert", "col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index = context.object.pose_library.pose_markers.active_index row = col2.row(align=True) row.operator(\"pose.copy\",", "b = bpy.context.selected_pose_bones for a in b: rotway = a.rotation_mode rotname = ''", "BV_IS_28 = hasattr(bpy.app, \"version\") and bpy.app.version >= (2, 80) return BV_IS_28 def poseAddLimited(ob,", "Public License for more details. # # You should have received a copy", "currently selected bones. # frame is the pre-determined frame where print(\"getting there eventually\")", "=linmix(b.location[2], p[0][2], value) b.scale[0] = linmix(b.scale[0], p[2][0], value) b.scale[1] = linmix(b.scale[1], p[2][1], value)", "= \"rotation_quaternion\" # for now, fix later # rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion", "Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK #####", "context.object prePose = getPose(ob.pose) #get a COPY of the current pose bpy.ops.pose.paste() mixToPose(ob,", "\"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v = False # v for", "p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for mixing poses class", "current pose ob = context.object prePose = getPose(ob.pose) # each element is a", "if bpy.app.version < (2, 80) else \"ADD\", text=\"\") # frame = int, to", "the new pose\", subtype='PERCENTAGE', min=0, max=100, default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister():", "\"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v = False # v", "class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose on to the current pose\"\"\" bl_idname =", "next to the built-in post library tools in armature tab\"\"\" layout = self.layout", "'rotation_axis_angle' else: rotway = \"rotation_quaternion\" # for now, fix later # rotation modes:", "bl_category = \"Tool\" if bv28() else 'Tools' def draw(self, context): layout = self.layout", "License, or (at your option) any later version. # # This program is", "I get it working in object mode too (apply to all bones..) class", "layout.template_ID(ob, \"pose_library\", new=\"poselib.new\", unlink=\"poselib.unlink\") if poselib: # list of poses in pose library", "and properties. \"\"\" global BV_IS_28 if not BV_IS_28: BV_IS_28 = hasattr(bpy.app, \"version\") and", "between poses in library and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import", "clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v = False #", "Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.", "layouts, UI, and properties. \"\"\" global BV_IS_28 if not BV_IS_28: BV_IS_28 = hasattr(bpy.app,", "def poseAddLimited(ob, frame): # ob is the object/armature, should get the list of", "frame): # ob is the object/armature, should get the list of currently selected", "min = 0, description=\"pose index\" ) # make a property here for which", "col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version < (2, 80) else \"REMOVE\", text=\"\") col2 = layout.column(align=True)", "= \"poselib.mixedposepaste\" bl_label = \"Mix current pose with copied pose\" bl_options = {'REGISTER',", "context.object prePose = getPose(ob.pose) # each element is a list of vectors, [loc,", "# it's a list type, so can't/no need to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(),", "armature tab\"\"\" layout = self.layout col = layout.split(align=True) p = col.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\")", "\"blender\": (2, 80, 0), \"location\": \"Armature > Pose Library\", \"description\": \"Allows dynamic mixing", "Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type = \"UI\" if bv28() else \"TOOLS\" #", "and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) # in the above,", "autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The tool for mixing poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the", "= 'VIEW_3D' bl_region_type = \"UI\" if bv28() else \"TOOLS\" # bl_context = \"posemode\"", "= ob.pose_library except: row = layout.row() row.label(text=\"Select an armature for poses\") return layout.template_ID(ob,", "def register(): bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty( name=\"Mix\", description=\"The mix factor between the original pose", "context): ob = context.object prePose = getPose(ob.pose) #get a COPY of the current", "= \"Pose Library Tools\" bl_space_type = 'VIEW_3D' bl_region_type = \"UI\" if bv28() else", "= \"posemode\" bl_category = \"Tool\" if bv28() else 'Tools' def draw(self, context): layout", "value) b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value) elif p[3] == \"rotation_axis_angle\": b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0],", "there eventually\") # brute force copies all location/rotation/scale of all bones and returns", "copy of the GNU General Public License # along with this program; if", "General Public License # along with this program; if not, write to the", "= None # global initialization def bv28(): \"\"\"Check if blender 2.8, for layouts,", "p[2][2], value) if p[3] == \"rotation_quaternion\" or p[3] == '': b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0],", "poses class mixCurrentPose(bpy.types.Operator): \"\"\"Mix-apply the selected library pose on to the current pose\"\"\"", "the stored pose on to the current pose\"\"\" bl_idname = \"poselib.mixedposepaste\" bl_label =", "have the numbers become the poseindex below for builtin def execute(self, context): #get", "is not None: p = col2.operator(\"poselib.mixcurrpose\",text=\"Apply mixed pose\") p.influence = context.scene.posemixinfluence p.pose_index =", "# ob is the object/armature, should get the list of currently selected bones.", "mixed pose\") p.influence = context.scene.posemixinfluence if context.object.pose_library: p.pose_index = context.object.pose_library.pose_markers.active_index col.prop(context.scene, \"posemixinfluence\", slider=True,", "return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' ) # in", "list def getPose(poseCurr): pose = [] b = bpy.context.selected_pose_bones for a in b:", "# exec not invoke, so menu doesn't need showing pose_marker_active = poselib.pose_markers.active col2", "to .copy() pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose", "tool, drawn next to the built-in post library tools in armature tab\"\"\" layout", "p[1][0], value) b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value) b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value) b.rotation_quaternion[3]", "== 'rotation_axis_angle': # it's a list type, so can't/no need to .copy() pose.append([a.location.copy(),", "# frame = int, to bpypass menu add frame of the last un-used", "will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty", "x in range(len(p[2])): #rotation_quaternion, not EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if", "\"UI\" if bv28() else \"TOOLS\" # bl_context = \"posemode\" bl_category = \"Tool\" if", "library and clipboard\", \"warning\": \"\", \"wiki_url\": \"https://github.com/TheDuckCow/pose-tools\", \"category\": \"Animation\"} import bpy v =", "row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True) col.active = (poselib.library", "getPose(ob.pose) # each element is a list of vectors, [loc, rot (quat.), scale,", "\"location\": \"Armature > Pose Library\", \"description\": \"Allows dynamic mixing between poses in library", "text=\"Mix Influence\") class poselibToolshelf(bpy.types.Panel): \"\"\"Post Tools operations\"\"\" bl_label = \"Pose Library Tools\" bl_space_type", "context.object try: poselib = ob.pose_library except: row = layout.row() row.label(text=\"Select an armature for", "= layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True) col.active", "{'FINISHED'} @classmethod def poll(cls, context): return (context.object and context.object.type == 'ARMATURE' and context.object.mode", "pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname]) else: pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname]) return pose # generic", "default=100) bpy.utils.register_class(mixCurrentPose) bpy.utils.register_class(poselibToolshelf) bpy.utils.register_class(mixedPosePaste) bpy.types.DATA_PT_pose_library.append(pose_tools_panel) def unregister(): bpy.types.DATA_PT_pose_library.remove(pose_tools_panel) bpy.utils.unregister_class(mixedPosePaste) bpy.utils.unregister_class(poselibToolshelf) bpy.utils.unregister_class(mixCurrentPose) del bpy.types.Scene.posemixinfluence", "object mode too (apply to all bones..) class mixedPosePaste(bpy.types.Operator): \"\"\"Mix-paste the stored pose", "EULER # b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value) if autoinsert: bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale') ####### # The", "# make a property here for which pose, like the input one? #", "poselib.pose_markers.active col2 = layout.column(align=True) if pose_marker_active is not None: col.operator(\"poselib.pose_remove\", icon=\"ZOOMOUT\" if bpy.app.version", "# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "x in range(len(p[1])): #position # b.location[x] =linmix(b.location[x], p[1][x], value) b.location[0] =linmix(b.location[0], p[0][0], value)", "row = layout.row() row.template_list(\"UI_UL_list\", \"pose_markers\", poselib, \"pose_markers\", poselib.pose_markers, \"active_index\", rows=3) col = row.column(align=True)", "3), \"blender\": (2, 80, 0), \"location\": \"Armature > Pose Library\", \"description\": \"Allows dynamic", "b: rotway = a.rotation_mode rotname = '' if rotway in ['QUATERNION']: rotname =", "b.scale[1] = linmix(b.scale[1], p[2][1], value) b.scale[2] = linmix(b.scale[2], p[2][2], value) if p[3] ==", "return pose # generic function for mixing two poses def mixToPose(ob, pose, value):", "get the list of currently selected bones. # frame is the pre-determined frame", "default=100, subtype='PERCENTAGE', unit='NONE', min = 0, max = 100, description=\"influence\" ) def execute(self,", "now, fix later elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']: rotname = \"rotation_euler\" elif rotway in" ]
[ "score(self, ctx): # Get message content = ctx.message.content.split()[1:] # Error if format is", "winner content[0] != content[1] # Imps are not the same ] if not", "\"Game Results Canceled!\" color = Color.red() if all_reactions[0].count == self.required_checks+1: title = \"Game", "of embed will be based on winner if content[2].upper() == \"I\": winner_str =", "imp_ids = [str(i) for i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command()", "of all 10 ID's voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error if", "1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions", "# Store in database self.game_db.add_game(member_ids) # Send embed listing all ids as mentions", "# title=\"Void\", # color=Color.dark_gray(), # description=desc # ) # # await ctx.send(embed=embed) def", "\"Type `!score # # [I/C]`.\\n\\ The # refers to the imp's\\n\\ place on", "4 parameters: # - [List] of ID's of all players # - [List]", "= Embed( title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction,", "\"`Crewmates Win!`\" # Print out game and wait for 6 reactions embed =", "score game if it's canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove", "= 1 for id in member_ids: players_str += f\"{number}. <@{id}>\\n\" number += 1", "self.__show_error(ctx, \"You are not in a game!\") return # Check if game is", "pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\" ) # @commands.command() # async def void(self,", "@commands.command() # async def void(self, ctx): # desc = \"Void\" # # embed", "color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction, user): msg_id =", "ctx.author voice_state = member.voice # Error if not in a VC if voice_state", "GameDatabase() # Print out error as embed async def __show_error(self, ctx, error_message): embed", "a voice channel!\") return # Get list of all 10 ID's voice_channel =", "all ids as mentions players_str = \"\" number = 1 for id in", "+= 1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str,", "embed = Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help", "user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score game if self.required_reactions threshold is met all_reactions", "= list(voice_channel.voice_states.keys()) # Error if VC does not have self.player_count players if len(member_ids)", "i in range(1, 11)] format_checks = [ content[0] in valid_nums, # Imps are", "\\ or self.required_x+1 == all_reactions[1].count: # Title variable is for embed title =", "# Print out game and wait for 6 reactions embed = Embed( title=\"Pending", "in range(1, 11)] format_checks = [ content[0] in valid_nums, # Imps are valid", "\"You are not in a game!\") return # Check if game is already", "== \"C\" ) # @commands.command() # async def void(self, ctx): # desc =", "fill # 4 parameters: # - [List] of ID's of all players #", "id in member_ids: players_str += f\"{number}. <@{id}>\\n\" number += 1 embed = Embed(", "import GameDatabase import backend.commands as db class Automated(commands.Cog): def __init__(self, bot): self.bot =", "VC if voice_state is None: await self.__show_error(ctx, \"You are not in a voice", "# # embed = Embed( # title=\"Void\", # color=Color.dark_gray(), # description=desc # )", "await self.__show_error(ctx, \"You are not in a voice channel!\") return # Get list", "pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id to game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]]", "database, we need to fill # 4 parameters: # - [List] of ID's", "in range(len(player_ids)): if i+1 != int(content[0]) and i+1 != int(content[1]): crew_str += f\"{crew_counter}.", "place on the list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\")", ") embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def score(self, ctx): # Get", "def __show_error(self, ctx, error_message): embed = Embed( title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed)", "10 self.required_checks = 8 # Not including bot self.required_x = 5 # Not", "int(content[0]) and i+1 != int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else:", "imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True )", "def start(self, ctx): # Find VC of player who typed command member =", "Not including bot self.required_x = 5 # Not including bot self.game_db = GameDatabase()", "list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command()", "\"Incorrect format!\") return valid_nums = [str(i) for i in range(1, 11)] format_checks =", "imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command() async def start(self, ctx): # Find", "title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help = \"Type `!score", "f\"{number}. <@{id}>\\n\" number += 1 embed = Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field(", "= [ content[0] in valid_nums, # Imps are valid numbers content[1] in valid_nums,", "for embed title = \"Game Results Canceled!\" color = Color.red() if all_reactions[0].count ==", "def void(self, ctx): # desc = \"Void\" # # embed = Embed( #", "not user.id in player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score game", "scoring_help = \"Type `!score # # [I/C]`.\\n\\ The # refers to the imp's\\n\\", "self.game_db.get_game_id(author_id) except: # Error if no game found await self.__show_error(ctx, \"You are not", "10 ID's voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error if VC does", "list(voice_channel.voice_states.keys()) # Error if VC does not have self.player_count players if len(member_ids) !=", "Add game to database player_ids = [str(i) for i in player_ids] imp_ids =", "author try: author_id = ctx.author.id player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: #", "game if not user.id in player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) #", "Embed( title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction, user):", "color = Color.green() # Get info from old embed old_embed = reaction.message.embeds[0] #", "embed = Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue() ) # crew_str = \"1.", "of all impostors # - [Boolean] that represents whether crew won or not", "Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help = \"Type", "= 10 self.required_checks = 8 # Not including bot self.required_x = 5 #", "on_reaction_add(self, reaction, user): msg_id = reaction.message.id # Check if message is a pending", "in database self.game_db.add_game(member_ids) # Send embed listing all ids as mentions players_str =", "player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game to database player_ids = [str(i) for", "valid_nums, # Imps are valid numbers content[1] in valid_nums, # Imps are valid", "# Title variable is for embed title = \"Game Results Canceled!\" color =", "!= 3: await self.__show_error(ctx, \"Incorrect format!\") return valid_nums = [str(i) for i in", "# Edit embed message embed = Embed( title=title, color=color, description=old_embed.description ) for field", "await self.__show_error(ctx, error_message) return # Store in database self.game_db.add_game(member_ids) # Send embed listing", "all impostors # - [Boolean] that represents whether crew won or not imp_ids,", "React to the message with the check and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await", "Print out error as embed async def __show_error(self, ctx, error_message): embed = Embed(", "# embed = Embed( # title=\"Void\", # color=Color.dark_gray(), # description=desc # ) #", "== \"I\": winner_str = \"`Impostors Win!`\" else: winner_str = \"`Crewmates Win!`\" # Print", "player_names, imp_ids, not did_imps_win) @commands.command() async def start(self, ctx): # Find VC of", "[str(i) for i in range(1, 11)] format_checks = [ content[0] in valid_nums, #", "= await ctx.send(embed=embed) # React to the message with the check and X", "To make a call to the scoring database, we need to fill #", "ID's voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error if VC does not", "await ctx.send(embed=embed) @commands.command() async def score(self, ctx): # Get message content = ctx.message.content.split()[1:]", "in the game if not user.id in player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji,", "i+1 != int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str +=", "discord import Color, Embed from backend.game_database import GameDatabase import backend.commands as db class", ") # @commands.command() # async def void(self, ctx): # desc = \"Void\" #", "desc = \"Void\" # # embed = Embed( # title=\"Void\", # color=Color.dark_gray(), #", "reaction if user isn't in the game if not user.id in player_ids and", "return # Store in database self.game_db.add_game(member_ids) # Send embed listing all ids as", "if voice_state is None: await self.__show_error(ctx, \"You are not in a voice channel!\")", ") scoring_help = \"Type `!score # # [I/C]`.\\n\\ The # refers to the", "[player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\" ) # @commands.command() #", "format!\") return # Find game with author try: author_id = ctx.author.id player_ids =", "import discord from discord.ext.commands import Bot from discord.ext import commands from discord import", "# - [List] of ID's of all impostors # - [Boolean] that represents", "player_names = [] for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game", "content[1] # Imps are not the same ] if not all(format_checks): await self.__show_error(ctx,", "async def score(self, ctx): # Get message content = ctx.message.content.split()[1:] # Error if", "GameDatabase import backend.commands as db class Automated(commands.Cog): def __init__(self, bot): self.bot = bot", "color = Color.red() if all_reactions[0].count == self.required_checks+1: title = \"Game Results Submitted!\" color", "\"\" number = 1 for id in member_ids: players_str += f\"{number}. <@{id}>\\n\" number", "incorrect if len(content) != 3: await self.__show_error(ctx, \"Incorrect format!\") return valid_nums = [str(i)", "player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game to database player_ids = [str(i) for i", "self.game_db.pending: return # Get game id and list of players in the game", "Game Results!\", description=winner_str, color=Color.blue() ) # crew_str = \"1. Dale\\n2. Peter\\n 3. Steve\"", "all_reactions = reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count: #", "for 6 reactions embed = Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue() ) #", "[List] of ID's of all impostors # - [Boolean] that represents whether crew", "await self.__show_error(ctx, \"You are not in a game!\") return # Check if game", "# [I/C]`.\\n\\ The # refers to the imp's\\n\\ place on the list.\" embed.add_field(", "embed message id to game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id,", "self.player_count players if len(member_ids) != self.player_count: error_message = \"There are not enough people", "Color.green() # Get info from old embed old_embed = reaction.message.embeds[0] # Edit embed", "self.__show_error(ctx, \"You are not in a voice channel!\") return # Get list of", "# Error if VC does not have self.player_count players if len(member_ids) != self.player_count:", "= Color.green() # Get info from old embed old_embed = reaction.message.embeds[0] # Edit", "content[1] in valid_nums, # Imps are valid numbers content[2].upper() in [\"I\", \"C\"], #", "imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\" ) #", "await reaction.message.edit(embed=embed) # Don't score game if it's canceled if self.required_x+1 == all_reactions[1].count:", "self.__show_error(ctx, error_message) return # Store in database self.game_db.add_game(member_ids) # Send embed listing all", "to fill # 4 parameters: # - [List] of ID's of all players", "people in your voice channel!\" await self.__show_error(ctx, error_message) return # Store in database", "content = ctx.message.content.split()[1:] # Error if format is incorrect if len(content) != 3:", "scored!\") return # Description of embed will be based on winner if content[2].upper()", "to database player_ids = [str(i) for i in player_ids] imp_ids = [str(i) for", "for i in player_ids] imp_ids = [str(i) for i in imp_ids[0]] db.add_game(player_ids, player_names,", "# Check if game is already being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game", "are not in a game!\") return # Check if game is already being", "to game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() ==", "[imp_ids], content[2].upper() == \"C\" ) # @commands.command() # async def void(self, ctx): #", "if not user.id in player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score", "\"I\": winner_str = \"`Impostors Win!`\" else: winner_str = \"`Crewmates Win!`\" # Print out", "i in range(len(player_ids)): if i+1 != int(content[0]) and i+1 != int(content[1]): crew_str +=", "1 for id in member_ids: players_str += f\"{number}. <@{id}>\\n\" number += 1 embed", "def __init__(self, bot): self.bot = bot self.player_count = 10 self.required_checks = 8 #", "imp_str = \"\" crew_counter = imp_counter = 1 for i in range(len(player_ids)): if", "await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id to game database imp_ids", "= reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count: # Title", "player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\" ) # @commands.command() # async", "players in the game game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove reaction", "return # Find game with author try: author_id = ctx.author.id player_ids = self.game_db.get_game(author_id)", "self.game_db.add_game(member_ids) # Send embed listing all ids as mentions players_str = \"\" number", "ids as mentions players_str = \"\" number = 1 for id in member_ids:", "[Boolean] that represents whether crew won or not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names", "all_reactions[0].count == self.required_checks+1: title = \"Game Results Submitted!\" color = Color.green() # Get", "return # Remove game from pending games self.game_db.remove_pending_game(game_id) # To make a call", "game id and list of players in the game game_id = self.game_db.pending[msg_id] player_ids", "len(content) != 3: await self.__show_error(ctx, \"Incorrect format!\") return valid_nums = [str(i) for i", "- [Boolean] that represents whether crew won or not imp_ids, did_imps_win = self.game_db.imps[game_id]", "to the message with the check and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\")", "won or not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names = [] for id in", "crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter", "= \"`Crewmates Win!`\" # Print out game and wait for 6 reactions embed", "on winner if content[2].upper() == \"I\": winner_str = \"`Impostors Win!`\" else: winner_str =", "# Error if no game found await self.__show_error(ctx, \"You are not in a", "== all_reactions[1].count: # Title variable is for embed title = \"Game Results Canceled!\"", "Score game if self.required_reactions threshold is met all_reactions = reaction.message.reactions if self.required_checks+1 ==", "imp_str = \"1. John\\n2. David\" # imp_counter = 1 crew_str = imp_str =", "f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str,", "1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True", "name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg", "== self.required_checks+1: title = \"Game Results Submitted!\" color = Color.green() # Get info", "if VC does not have self.player_count players if len(member_ids) != self.player_count: error_message =", "title=\"Void\", # color=Color.dark_gray(), # description=desc # ) # # await ctx.send(embed=embed) def setup(bot):", "Embed from backend.game_database import GameDatabase import backend.commands as db class Automated(commands.Cog): def __init__(self,", "Bot from discord.ext import commands from discord import Color, Embed from backend.game_database import", "valid_nums, # Imps are valid numbers content[2].upper() in [\"I\", \"C\"], # Valid game", "impostors # - [Boolean] that represents whether crew won or not imp_ids, did_imps_win", "self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove reaction if user isn't in the game", "VC does not have self.player_count players if len(member_ids) != self.player_count: error_message = \"There", "content[0] in valid_nums, # Imps are valid numbers content[1] in valid_nums, # Imps", "game winner content[0] != content[1] # Imps are not the same ] if", "if len(content) != 3: await self.__show_error(ctx, \"Incorrect format!\") return valid_nums = [str(i) for", "in valid_nums, # Imps are valid numbers content[2].upper() in [\"I\", \"C\"], # Valid", "X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id to game", "# Check if message is a pending game if not msg_id in self.game_db.pending:", "self.__show_error(ctx, \"Incorrect format!\") return valid_nums = [str(i) for i in range(1, 11)] format_checks", "Not including bot self.game_db = GameDatabase() # Print out error as embed async", "not enough people in your voice channel!\" await self.__show_error(ctx, error_message) return # Store", "self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game from pending games self.game_db.remove_pending_game(game_id) #", "- [List] of ID's of all players # - [List] of display names", "member_ids: players_str += f\"{number}. <@{id}>\\n\" number += 1 embed = Embed( title=\"Game Setup\",", "listing all ids as mentions players_str = \"\" number = 1 for id", "[I/C]`.\\n\\ The # refers to the imp's\\n\\ place on the list.\" embed.add_field( name=\"When", "reaction.message.embeds[0] # Edit embed message embed = Embed( title=title, color=color, description=old_embed.description ) for", "based on winner if content[2].upper() == \"I\": winner_str = \"`Impostors Win!`\" else: winner_str", "of ID's of all players # - [List] of display names of all", "voice channel!\") return # Get list of all 10 ID's voice_channel = voice_state.channel", "if not msg_id in self.game_db.pending: return # Get game id and list of", "= \"Game Results Canceled!\" color = Color.red() if all_reactions[0].count == self.required_checks+1: title =", "embed message embed = Embed( title=title, color=color, description=old_embed.description ) for field in old_embed.fields:", "in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command() async def start(self, ctx): #", "# # [I/C]`.\\n\\ The # refers to the imp's\\n\\ place on the list.\"", "# Remove reaction if user isn't in the game if not user.id in", "voice channel!\" await self.__show_error(ctx, error_message) return # Store in database self.game_db.add_game(member_ids) # Send", "self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already being scored!\") return # Description of embed", "John\\n2. David\" # imp_counter = 1 crew_str = imp_str = \"\" crew_counter =", "return # Get game id and list of players in the game game_id", "a pending game if not msg_id in self.game_db.pending: return # Get game id", "for id in member_ids: players_str += f\"{number}. <@{id}>\\n\" number += 1 embed =", "crew_counter = 1 # imp_str = \"1. John\\n2. David\" # imp_counter = 1", "self.bot = bot self.player_count = 10 self.required_checks = 8 # Not including bot", "Remove game from pending games self.game_db.remove_pending_game(game_id) # To make a call to the", "# Imps are valid numbers content[2].upper() in [\"I\", \"C\"], # Valid game winner", "is for embed title = \"Game Results Canceled!\" color = Color.red() if all_reactions[0].count", "reaction, user): msg_id = reaction.message.id # Check if message is a pending game", "inline=True ) scoring_help = \"Type `!score # # [I/C]`.\\n\\ The # refers to", "embed async def __show_error(self, ctx, error_message): embed = Embed( title=\"Error!\", color=Color.red(), description=error_message )", "error_message) return # Store in database self.game_db.add_game(member_ids) # Send embed listing all ids", "self.game_db.games[game_id] # Remove reaction if user isn't in the game if not user.id", "range(1, 11)] format_checks = [ content[0] in valid_nums, # Imps are valid numbers", "+= f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\",", "self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count: # Title variable is for", "`!score # # [I/C]`.\\n\\ The # refers to the imp's\\n\\ place on the", "all players # - [List] of display names of all players # -", "[str(i) for i in player_ids] imp_ids = [str(i) for i in imp_ids[0]] db.add_game(player_ids,", "are valid numbers content[2].upper() in [\"I\", \"C\"], # Valid game winner content[0] !=", "= [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\" ) # @commands.command()", "Find VC of player who typed command member = ctx.author voice_state = member.voice", "Win!`\" else: winner_str = \"`Crewmates Win!`\" # Print out game and wait for", "the game game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove reaction if user", "else: winner_str = \"`Crewmates Win!`\" # Print out game and wait for 6", "# Give embed message id to game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game(", "from old embed old_embed = reaction.message.embeds[0] # Edit embed message embed = Embed(", "game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\"", "self.player_count = 10 self.required_checks = 8 # Not including bot self.required_x = 5", "and i+1 != int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str", "= \"`Impostors Win!`\" else: winner_str = \"`Crewmates Win!`\" # Print out game and", "if i+1 != int(content[0]) and i+1 != int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter", "+= 1 embed = Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True", "from pending games self.game_db.remove_pending_game(game_id) # To make a call to the scoring database,", "class Automated(commands.Cog): def __init__(self, bot): self.bot = bot self.player_count = 10 self.required_checks =", "await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id to game database imp_ids = [player_ids[int(content[0])-1],", "content[2].upper() == \"C\" ) # @commands.command() # async def void(self, ctx): # desc", "reactions required.\") pending_msg = await ctx.send(embed=embed) # React to the message with the", "= imp_str = \"\" crew_counter = imp_counter = 1 for i in range(len(player_ids)):", "# Error if format is incorrect if len(content) != 3: await self.__show_error(ctx, \"Incorrect", "parameters: # - [List] of ID's of all players # - [List] of", "and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id to", ") embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await ctx.send(embed=embed) # React to the message", "players # - [List] of ID's of all impostors # - [Boolean] that", "embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def score(self, ctx): # Get message", "description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction, user): msg_id = reaction.message.id", "in a game!\") return # Check if game is already being scored if", "user) # Score game if self.required_reactions threshold is met all_reactions = reaction.message.reactions if", "async def void(self, ctx): # desc = \"Void\" # # embed = Embed(", "including bot self.game_db = GameDatabase() # Print out error as embed async def", "from discord.ext import commands from discord import Color, Embed from backend.game_database import GameDatabase", "- [List] of display names of all players # - [List] of ID's", "is incorrect if len(content) != 3: await self.__show_error(ctx, \"Incorrect format!\") return valid_nums =", "Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id to game database", "ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction, user): msg_id = reaction.message.id # Check if", "in valid_nums, # Imps are valid numbers content[1] in valid_nums, # Imps are", "self.player_count: error_message = \"There are not enough people in your voice channel!\" await", "embed will be based on winner if content[2].upper() == \"I\": winner_str = \"`Impostors", "David\" # imp_counter = 1 crew_str = imp_str = \"\" crew_counter = imp_counter", "# Get message content = ctx.message.content.split()[1:] # Error if format is incorrect if", "value=players_str, inline=True ) scoring_help = \"Type `!score # # [I/C]`.\\n\\ The # refers", "is a pending game if not msg_id in self.game_db.pending: return # Get game", "error_message): embed = Embed( title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async def", "reaction.message.guild.get_member(id).display_name ) # Add game to database player_ids = [str(i) for i in", "Embed( # title=\"Void\", # color=Color.dark_gray(), # description=desc # ) # # await ctx.send(embed=embed)", "Don't score game if it's canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return #", "winner_str = \"`Crewmates Win!`\" # Print out game and wait for 6 reactions", "on the list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await", "[\"I\", \"C\"], # Valid game winner content[0] != content[1] # Imps are not", "Give embed message id to game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id,", "+= f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter +=", "if self.required_reactions threshold is met all_reactions = reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\", "# crew_str = \"1. Dale\\n2. Peter\\n 3. Steve\" # crew_counter = 1 #", "msg_id in self.game_db.pending: return # Get game id and list of players in", "1 for i in range(len(player_ids)): if i+1 != int(content[0]) and i+1 != int(content[1]):", "commands from discord import Color, Embed from backend.game_database import GameDatabase import backend.commands as", "= self.game_db.games[game_id] # Remove reaction if user isn't in the game if not", "bot self.player_count = 10 self.required_checks = 8 # Not including bot self.required_x =", "embed.add_field( name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async", "Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help = \"Type `!score #", "message id to game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids],", "message is a pending game if not msg_id in self.game_db.pending: return # Get", ") await reaction.message.edit(embed=embed) # Don't score game if it's canceled if self.required_x+1 ==", "\"Game Results Submitted!\" color = Color.green() # Get info from old embed old_embed", "inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await ctx.send(embed=embed) # React to the", "= [] for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game to", "format is incorrect if len(content) != 3: await self.__show_error(ctx, \"Incorrect format!\") return valid_nums", "user.id in player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score game if", "self.game_db.imps[game_id] player_names = [] for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add", "= self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: # Error if no game found await", "# imp_str = \"1. John\\n2. David\" # imp_counter = 1 crew_str = imp_str", "= Embed( title=title, color=color, description=old_embed.description ) for field in old_embed.fields: embed.add_field( name=field.name, value=field.value,", "title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction, user): msg_id", "crew_str = \"1. Dale\\n2. Peter\\n 3. Steve\" # crew_counter = 1 # imp_str", "@commands.command() async def start(self, ctx): # Find VC of player who typed command", "format!\") return valid_nums = [str(i) for i in range(1, 11)] format_checks = [", "not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score game if self.required_reactions threshold is met", "inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def score(self, ctx): #", "await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction, user): msg_id = reaction.message.id # Check", "Embed( title=title, color=color, description=old_embed.description ) for field in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True", "not in a game!\") return # Check if game is already being scored", "Submitted!\" color = Color.green() # Get info from old embed old_embed = reaction.message.embeds[0]", "= member.voice # Error if not in a VC if voice_state is None:", "imp_counter = 1 crew_str = imp_str = \"\" crew_counter = imp_counter = 1", "ctx): # Get message content = ctx.message.content.split()[1:] # Error if format is incorrect", "pending games self.game_db.remove_pending_game(game_id) # To make a call to the scoring database, we", "# - [List] of ID's of all players # - [List] of display", "= 1 # imp_str = \"1. John\\n2. David\" # imp_counter = 1 crew_str", "for i in range(len(player_ids)): if i+1 != int(content[0]) and i+1 != int(content[1]): crew_str", "Error if not in a VC if voice_state is None: await self.__show_error(ctx, \"You", "with author try: author_id = ctx.author.id player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except:", "if not in a VC if voice_state is None: await self.__show_error(ctx, \"You are", "id to game database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper()", "color=color, description=old_embed.description ) for field in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True ) await", "\"C\" ) # @commands.command() # async def void(self, ctx): # desc = \"Void\"", "display names of all players # - [List] of ID's of all impostors", "Get game id and list of players in the game game_id = self.game_db.pending[msg_id]", "number += 1 embed = Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str,", "self.__show_error(ctx, \"Incorrect format!\") return # Find game with author try: author_id = ctx.author.id", "a VC if voice_state is None: await self.__show_error(ctx, \"You are not in a", "member_ids = list(voice_channel.voice_states.keys()) # Error if VC does not have self.player_count players if", "game if it's canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game", "scoring database, we need to fill # 4 parameters: # - [List] of", "= GameDatabase() # Print out error as embed async def __show_error(self, ctx, error_message):", "The # refers to the imp's\\n\\ place on the list.\" embed.add_field( name=\"When finished...\",", "Canceled!\" color = Color.red() if all_reactions[0].count == self.required_checks+1: title = \"Game Results Submitted!\"", "Win!`\" # Print out game and wait for 6 reactions embed = Embed(", "ID's of all impostors # - [Boolean] that represents whether crew won or", "discord from discord.ext.commands import Bot from discord.ext import commands from discord import Color,", "None: await self.__show_error(ctx, \"You are not in a voice channel!\") return # Get", "game and wait for 6 reactions embed = Embed( title=\"Pending Game Results!\", description=winner_str,", "canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game from pending games", "game if not msg_id in self.game_db.pending: return # Get game id and list", "# imp_counter = 1 crew_str = imp_str = \"\" crew_counter = imp_counter =", ") await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self, reaction, user): msg_id = reaction.message.id #", "voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error if VC does not have self.player_count players", "discord.ext.commands import Bot from discord.ext import commands from discord import Color, Embed from", "# refers to the imp's\\n\\ place on the list.\" embed.add_field( name=\"When finished...\", value=scoring_help,", "pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id to game database imp_ids =", "scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already being scored!\") return # Description", "embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\")", "<@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field(", "+= 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks}", "ctx.author.id player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: # Error if no game", "of player who typed command member = ctx.author voice_state = member.voice # Error", "content[0] != content[1] # Imps are not the same ] if not all(format_checks):", "i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command() async def start(self, ctx):", "!= int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str += f\"{imp_counter}.", "discord.ext import commands from discord import Color, Embed from backend.game_database import GameDatabase import", ") embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await ctx.send(embed=embed)", "in [\"I\", \"C\"], # Valid game winner content[0] != content[1] # Imps are", "for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game to database player_ids", "game!\") return # Check if game is already being scored if self.game_db.is_game_pending(game_id): await", "return valid_nums = [str(i) for i in range(1, 11)] format_checks = [ content[0]", "ctx, error_message): embed = Embed( title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async", "[] for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game to database", "try: author_id = ctx.author.id player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: # Error", "or self.required_x+1 == all_reactions[1].count: # Title variable is for embed title = \"Game", "valid_nums = [str(i) for i in range(1, 11)] format_checks = [ content[0] in", "players if len(member_ids) != self.player_count: error_message = \"There are not enough people in", "if user isn't in the game if not user.id in player_ids and not", "valid numbers content[1] in valid_nums, # Imps are valid numbers content[2].upper() in [\"I\",", "for i in range(1, 11)] format_checks = [ content[0] in valid_nums, # Imps", "game is already being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already being", "games self.game_db.remove_pending_game(game_id) # To make a call to the scoring database, we need", "call to the scoring database, we need to fill # 4 parameters: #", "in player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score game if self.required_reactions", "import Color, Embed from backend.game_database import GameDatabase import backend.commands as db class Automated(commands.Cog):", ") embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help = \"Type `!score # # [I/C]`.\\n\\", "player_ids] imp_ids = [str(i) for i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win)", "# Remove game from pending games self.game_db.remove_pending_game(game_id) # To make a call to", "reactions embed = Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue() ) # crew_str =", "# Find VC of player who typed command member = ctx.author voice_state =", "embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await ctx.send(embed=embed) #", "have self.player_count players if len(member_ids) != self.player_count: error_message = \"There are not enough", "# Find game with author try: author_id = ctx.author.id player_ids = self.game_db.get_game(author_id) game_id", "crew won or not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names = [] for id", "= \"1. John\\n2. David\" # imp_counter = 1 crew_str = imp_str = \"\"", "that represents whether crew won or not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names =", "self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\" ) # @commands.command() # async def", "3. Steve\" # crew_counter = 1 # imp_str = \"1. John\\n2. David\" #", "await self.__show_error(ctx, \"Incorrect format!\") return # Find game with author try: author_id =", "Imps are valid numbers content[2].upper() in [\"I\", \"C\"], # Valid game winner content[0]", "= ctx.author.id player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: # Error if no", "valid numbers content[2].upper() in [\"I\", \"C\"], # Valid game winner content[0] != content[1]", "to the imp's\\n\\ place on the list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True )", "Get message content = ctx.message.content.split()[1:] # Error if format is incorrect if len(content)", "# Description of embed will be based on winner if content[2].upper() == \"I\":", "mentions players_str = \"\" number = 1 for id in member_ids: players_str +=", "self.required_x+1 == all_reactions[1].count: # Title variable is for embed title = \"Game Results", "@commands.Cog.listener() async def on_reaction_add(self, reaction, user): msg_id = reaction.message.id # Check if message", "if all_reactions[0].count == self.required_checks+1: title = \"Game Results Submitted!\" color = Color.green() #", "# Imps are valid numbers content[1] in valid_nums, # Imps are valid numbers", "Error if format is incorrect if len(content) != 3: await self.__show_error(ctx, \"Incorrect format!\")", "winner_str = \"`Impostors Win!`\" else: winner_str = \"`Crewmates Win!`\" # Print out game", "database imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]] self.game_db.add_pending_game( pending_msg.id, game_id, [imp_ids], content[2].upper() == \"C\" )", "make a call to the scoring database, we need to fill # 4", "Results Submitted!\" color = Color.green() # Get info from old embed old_embed =", "= ctx.message.content.split()[1:] # Error if format is incorrect if len(content) != 3: await", "and list of players in the game game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id]", "database player_ids = [str(i) for i in player_ids] imp_ids = [str(i) for i", "!= content[1] # Imps are not the same ] if not all(format_checks): await", "content[2].upper() in [\"I\", \"C\"], # Valid game winner content[0] != content[1] # Imps", "ctx.send(embed=embed) # React to the message with the check and X Emoji await", "your voice channel!\" await self.__show_error(ctx, error_message) return # Store in database self.game_db.add_game(member_ids) #", "players_str += f\"{number}. <@{id}>\\n\" number += 1 embed = Embed( title=\"Game Setup\", color=Color.blue(),", "if self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count: # Title variable is", "value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def score(self, ctx):", "embed.add_field( name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed) # Don't score game if it's", "name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await ctx.send(embed=embed) # React", "Remove reaction if user isn't in the game if not user.id in player_ids", "<@{id}>\\n\" number += 1 embed = Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\",", "if no game found await self.__show_error(ctx, \"You are not in a game!\") return", "start(self, ctx): # Find VC of player who typed command member = ctx.author", "len(member_ids) != self.player_count: error_message = \"There are not enough people in your voice", "the check and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message", "command member = ctx.author voice_state = member.voice # Error if not in a", "self.__show_error(ctx, \"Game is already being scored!\") return # Description of embed will be", "# Error if not in a VC if voice_state is None: await self.__show_error(ctx,", "are not in a voice channel!\") return # Get list of all 10", "message content = ctx.message.content.split()[1:] # Error if format is incorrect if len(content) !=", "if content[2].upper() == \"I\": winner_str = \"`Impostors Win!`\" else: winner_str = \"`Crewmates Win!`\"", "found await self.__show_error(ctx, \"You are not in a game!\") return # Check if", "async def start(self, ctx): # Find VC of player who typed command member", "player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score game if self.required_reactions threshold", "= imp_counter = 1 for i in range(len(player_ids)): if i+1 != int(content[0]) and", "it's canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game from pending", "1 embed = Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True )", "as db class Automated(commands.Cog): def __init__(self, bot): self.bot = bot self.player_count = 10", "imp_ids, not did_imps_win) @commands.command() async def start(self, ctx): # Find VC of player", "list of players in the game game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] #", "{self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def score(self, ctx): # Get message content =", "required.\") pending_msg = await ctx.send(embed=embed) # React to the message with the check", "def on_reaction_add(self, reaction, user): msg_id = reaction.message.id # Check if message is a", "not have self.player_count players if len(member_ids) != self.player_count: error_message = \"There are not", "color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help = \"Type `!score # #", ") # crew_str = \"1. Dale\\n2. Peter\\n 3. Steve\" # crew_counter = 1", "error as embed async def __show_error(self, ctx, error_message): embed = Embed( title=\"Error!\", color=Color.red(),", "in a voice channel!\") return # Get list of all 10 ID's voice_channel", "or not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names = [] for id in player_ids:", "ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def score(self, ctx): # Get message content", "info from old embed old_embed = reaction.message.embeds[0] # Edit embed message embed =", "= reaction.message.embeds[0] # Edit embed message embed = Embed( title=title, color=color, description=old_embed.description )", "if format is incorrect if len(content) != 3: await self.__show_error(ctx, \"Incorrect format!\") return", "embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help = \"Type `!score # # [I/C]`.\\n\\ The", "# crew_counter = 1 # imp_str = \"1. John\\n2. David\" # imp_counter =", "<gh_stars>0 import asyncio import discord from discord.ext.commands import Bot from discord.ext import commands", "== all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count: # Title variable is for embed", "are not enough people in your voice channel!\" await self.__show_error(ctx, error_message) return #", "] if not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return # Find game with", "i+1 != int(content[0]) and i+1 != int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter +=", "message with the check and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give", "- [List] of ID's of all impostors # - [Boolean] that represents whether", "user): msg_id = reaction.message.id # Check if message is a pending game if", "variable is for embed title = \"Game Results Canceled!\" color = Color.red() if", "self.game_db = GameDatabase() # Print out error as embed async def __show_error(self, ctx,", "all_reactions[1].count: # Title variable is for embed title = \"Game Results Canceled!\" color", "pending game if not msg_id in self.game_db.pending: return # Get game id and", "game_id = self.game_db.get_game_id(author_id) except: # Error if no game found await self.__show_error(ctx, \"You", "user isn't in the game if not user.id in player_ids and not user.bot:", "does not have self.player_count players if len(member_ids) != self.player_count: error_message = \"There are", "void(self, ctx): # desc = \"Void\" # # embed = Embed( # title=\"Void\",", "a call to the scoring database, we need to fill # 4 parameters:", "__init__(self, bot): self.bot = bot self.player_count = 10 self.required_checks = 8 # Not", "format_checks = [ content[0] in valid_nums, # Imps are valid numbers content[1] in", "of display names of all players # - [List] of ID's of all", "game from pending games self.game_db.remove_pending_game(game_id) # To make a call to the scoring", "pending_msg = await ctx.send(embed=embed) # React to the message with the check and", "Color, Embed from backend.game_database import GameDatabase import backend.commands as db class Automated(commands.Cog): def", "= [str(i) for i in range(1, 11)] format_checks = [ content[0] in valid_nums,", "@commands.command() async def score(self, ctx): # Get message content = ctx.message.content.split()[1:] # Error", "else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True )", "\"Incorrect format!\") return # Find game with author try: author_id = ctx.author.id player_ids", "# - [List] of display names of all players # - [List] of", "# Valid game winner content[0] != content[1] # Imps are not the same", "= self.game_db.get_game_id(author_id) except: # Error if no game found await self.__show_error(ctx, \"You are", "import asyncio import discord from discord.ext.commands import Bot from discord.ext import commands from", "id and list of players in the game game_id = self.game_db.pending[msg_id] player_ids =", "# Get list of all 10 ID's voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys())", "game_id, [imp_ids], content[2].upper() == \"C\" ) # @commands.command() # async def void(self, ctx):", "bot): self.bot = bot self.player_count = 10 self.required_checks = 8 # Not including", "Dale\\n2. Peter\\n 3. Steve\" # crew_counter = 1 # imp_str = \"1. John\\n2.", "imp_counter = 1 for i in range(len(player_ids)): if i+1 != int(content[0]) and i+1", "title = \"Game Results Submitted!\" color = Color.green() # Get info from old", "# Get game id and list of players in the game game_id =", "# Add game to database player_ids = [str(i) for i in player_ids] imp_ids", "embed listing all ids as mentions players_str = \"\" number = 1 for", "!= int(content[0]) and i+1 != int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1", "= \"Game Results Submitted!\" color = Color.green() # Get info from old embed", "= \"1. Dale\\n2. Peter\\n 3. Steve\" # crew_counter = 1 # imp_str =", "numbers content[2].upper() in [\"I\", \"C\"], # Valid game winner content[0] != content[1] #", "name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed) # Don't score game if it's canceled", "Error if VC does not have self.player_count players if len(member_ids) != self.player_count: error_message", "title=\"Pending Game Results!\", description=winner_str, color=Color.blue() ) # crew_str = \"1. Dale\\n2. Peter\\n 3.", "need to fill # 4 parameters: # - [List] of ID's of all", "self.required_x = 5 # Not including bot self.game_db = GameDatabase() # Print out", "for i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command() async def start(self,", "not msg_id in self.game_db.pending: return # Get game id and list of players", "<@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True", "= \"\" crew_counter = imp_counter = 1 for i in range(len(player_ids)): if i+1", "embed = Embed( title=title, color=color, description=old_embed.description ) for field in old_embed.fields: embed.add_field( name=field.name,", "member = ctx.author voice_state = member.voice # Error if not in a VC", "as embed async def __show_error(self, ctx, error_message): embed = Embed( title=\"Error!\", color=Color.red(), description=error_message", "= \"Type `!score # # [I/C]`.\\n\\ The # refers to the imp's\\n\\ place", "return # Check if game is already being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx,", "will be based on winner if content[2].upper() == \"I\": winner_str = \"`Impostors Win!`\"", "Print out game and wait for 6 reactions embed = Embed( title=\"Pending Game", "Check if message is a pending game if not msg_id in self.game_db.pending: return", "[List] of display names of all players # - [List] of ID's of", "= voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error if VC does not have self.player_count", "Send embed listing all ids as mentions players_str = \"\" number = 1", "\"C\"], # Valid game winner content[0] != content[1] # Imps are not the", "no game found await self.__show_error(ctx, \"You are not in a game!\") return #", "voice_state = member.voice # Error if not in a VC if voice_state is", "= self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove reaction if user isn't in the", "# Score game if self.required_reactions threshold is met all_reactions = reaction.message.reactions if self.required_checks+1", "[ content[0] in valid_nums, # Imps are valid numbers content[1] in valid_nums, #", "Results Canceled!\" color = Color.red() if all_reactions[0].count == self.required_checks+1: title = \"Game Results", "[str(i) for i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command() async def", "not did_imps_win) @commands.command() async def start(self, ctx): # Find VC of player who", "out game and wait for 6 reactions embed = Embed( title=\"Pending Game Results!\",", "# Don't score game if it's canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return", "and wait for 6 reactions embed = Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue()", "the message with the check and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") #", "ctx): # Find VC of player who typed command member = ctx.author voice_state", "\"`Impostors Win!`\" else: winner_str = \"`Crewmates Win!`\" # Print out game and wait", "msg_id = reaction.message.id # Check if message is a pending game if not", "# Not including bot self.game_db = GameDatabase() # Print out error as embed", "Find game with author try: author_id = ctx.author.id player_ids = self.game_db.get_game(author_id) game_id =", "from discord import Color, Embed from backend.game_database import GameDatabase import backend.commands as db", "being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already being scored!\") return #", "Imps are not the same ] if not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\")", "who typed command member = ctx.author voice_state = member.voice # Error if not", "being scored!\") return # Description of embed will be based on winner if", "player_ids = self.game_db.games[game_id] # Remove reaction if user isn't in the game if", "Get list of all 10 ID's voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys()) #", "value=crew_str, inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg =", "a game!\") return # Check if game is already being scored if self.game_db.is_game_pending(game_id):", "if game is already being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already", "in your voice channel!\" await self.__show_error(ctx, error_message) return # Store in database self.game_db.add_game(member_ids)", "name=\"Players\", value=players_str, inline=True ) scoring_help = \"Type `!score # # [I/C]`.\\n\\ The #", "= Embed( title=\"Game Setup\", color=Color.blue(), ) embed.add_field( name=\"Players\", value=players_str, inline=True ) scoring_help =", "enough people in your voice channel!\" await self.__show_error(ctx, error_message) return # Store in", "reaction.message.remove_reaction(reaction.emoji, user) # Score game if self.required_reactions threshold is met all_reactions = reaction.message.reactions", "8 # Not including bot self.required_x = 5 # Not including bot self.game_db", "and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user) # Score game if self.required_reactions threshold is", "Title variable is for embed title = \"Game Results Canceled!\" color = Color.red()", "def score(self, ctx): # Get message content = ctx.message.content.split()[1:] # Error if format", "3: await self.__show_error(ctx, \"Incorrect format!\") return valid_nums = [str(i) for i in range(1,", "self.required_checks+1: title = \"Game Results Submitted!\" color = Color.green() # Get info from", "value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await ctx.send(embed=embed) # React to", "already being scored!\") return # Description of embed will be based on winner", "all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return # Find game with author try: author_id", "# async def void(self, ctx): # desc = \"Void\" # # embed =", "= [str(i) for i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command() async", "the list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed)", "= \"Void\" # # embed = Embed( # title=\"Void\", # color=Color.dark_gray(), # description=desc", "self.game_db.remove_pending_game(game_id) # To make a call to the scoring database, we need to", "await self.__show_error(ctx, \"Incorrect format!\") return valid_nums = [str(i) for i in range(1, 11)]", "value=field.value, inline=True ) await reaction.message.edit(embed=embed) # Don't score game if it's canceled if", "# - [Boolean] that represents whether crew won or not imp_ids, did_imps_win =", "as mentions players_str = \"\" number = 1 for id in member_ids: players_str", "winner if content[2].upper() == \"I\": winner_str = \"`Impostors Win!`\" else: winner_str = \"`Crewmates", "= 8 # Not including bot self.required_x = 5 # Not including bot", "async def __show_error(self, ctx, error_message): embed = Embed( title=\"Error!\", color=Color.red(), description=error_message ) await", "author_id = ctx.author.id player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: # Error if", "Automated(commands.Cog): def __init__(self, bot): self.bot = bot self.player_count = 10 self.required_checks = 8", "# Not including bot self.required_x = 5 # Not including bot self.game_db =", "from discord.ext.commands import Bot from discord.ext import commands from discord import Color, Embed", "in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed) # Don't score game", "i in player_ids] imp_ids = [str(i) for i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids,", "if len(member_ids) != self.player_count: error_message = \"There are not enough people in your", "refers to the imp's\\n\\ place on the list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True", "are not the same ] if not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return", "Results!\", description=winner_str, color=Color.blue() ) # crew_str = \"1. Dale\\n2. Peter\\n 3. Steve\" #", "imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\", value=crew_str, inline=True ) embed.add_field(", "VC of player who typed command member = ctx.author voice_state = member.voice #", "voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error if VC does not have", "not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names = [] for id in player_ids: player_names.append(", "backend.commands as db class Automated(commands.Cog): def __init__(self, bot): self.bot = bot self.player_count =", "import Bot from discord.ext import commands from discord import Color, Embed from backend.game_database", "reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count: # Title variable", "await reaction.message.remove_reaction(reaction.emoji, user) # Score game if self.required_reactions threshold is met all_reactions =", "range(len(player_ids)): if i+1 != int(content[0]) and i+1 != int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\"", "== all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game from pending games self.game_db.remove_pending_game(game_id) # To", "self.required_checks = 8 # Not including bot self.required_x = 5 # Not including", "= reaction.message.id # Check if message is a pending game if not msg_id", "crew_counter = imp_counter = 1 for i in range(len(player_ids)): if i+1 != int(content[0])", "# To make a call to the scoring database, we need to fill", "game to database player_ids = [str(i) for i in player_ids] imp_ids = [str(i)", "game game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove reaction if user isn't", "threshold is met all_reactions = reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1", "embed = Embed( title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener() async def on_reaction_add(self,", "!= self.player_count: error_message = \"There are not enough people in your voice channel!\"", "in self.game_db.pending: return # Get game id and list of players in the", "channel!\") return # Get list of all 10 ID's voice_channel = voice_state.channel member_ids", "= Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue() ) # crew_str = \"1. Dale\\n2.", "content[2].upper() == \"I\": winner_str = \"`Impostors Win!`\" else: winner_str = \"`Crewmates Win!`\" #", "Get info from old embed old_embed = reaction.message.embeds[0] # Edit embed message embed", "did_imps_win) @commands.command() async def start(self, ctx): # Find VC of player who typed", "# color=Color.dark_gray(), # description=desc # ) # # await ctx.send(embed=embed) def setup(bot): bot.add_cog(Automated(bot))", "is already being scored!\") return # Description of embed will be based on", "Check if game is already being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is", "if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already being scored!\") return # Description of", "not in a voice channel!\") return # Get list of all 10 ID's", "# desc = \"Void\" # # embed = Embed( # title=\"Void\", # color=Color.dark_gray(),", "typed command member = ctx.author voice_state = member.voice # Error if not in", "in a VC if voice_state is None: await self.__show_error(ctx, \"You are not in", "crew_counter += 1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1 embed.add_field( name=\"Crewmates\",", "if message is a pending game if not msg_id in self.game_db.pending: return #", "in member_ids: players_str += f\"{number}. <@{id}>\\n\" number += 1 embed = Embed( title=\"Game", "Steve\" # crew_counter = 1 # imp_str = \"1. John\\n2. David\" # imp_counter", "title=title, color=color, description=old_embed.description ) for field in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True )", "is already being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already being scored!\")", "Description of embed will be based on winner if content[2].upper() == \"I\": winner_str", "6 reactions embed = Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue() ) # crew_str", "\"1. Dale\\n2. Peter\\n 3. Steve\" # crew_counter = 1 # imp_str = \"1.", "all 10 ID's voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error if VC", "5 # Not including bot self.game_db = GameDatabase() # Print out error as", "\"1. John\\n2. David\" # imp_counter = 1 crew_str = imp_str = \"\" crew_counter", "old embed old_embed = reaction.message.embeds[0] # Edit embed message embed = Embed( title=title,", "f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\" imp_counter += 1", "the scoring database, we need to fill # 4 parameters: # - [List]", "member.voice # Error if not in a VC if voice_state is None: await", "same ] if not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return # Find game", "database self.game_db.add_game(member_ids) # Send embed listing all ids as mentions players_str = \"\"", "11)] format_checks = [ content[0] in valid_nums, # Imps are valid numbers content[1]", "for field in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed) # Don't", "# Print out error as embed async def __show_error(self, ctx, error_message): embed =", "# Imps are not the same ] if not all(format_checks): await self.__show_error(ctx, \"Incorrect", "of all players # - [List] of display names of all players #", "self.game_db.reverse_pending(game_id) return # Remove game from pending games self.game_db.remove_pending_game(game_id) # To make a", "field in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed) # Don't score", "old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed) # Don't score game if", "except: # Error if no game found await self.__show_error(ctx, \"You are not in", "players_str = \"\" number = 1 for id in member_ids: players_str += f\"{number}.", "name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def", "Edit embed message embed = Embed( title=title, color=color, description=old_embed.description ) for field in", "asyncio import discord from discord.ext.commands import Bot from discord.ext import commands from discord", "player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: # Error if no game found", "int(content[1]): crew_str += f\"{crew_counter}. <@{player_ids[i]}>\\n\" crew_counter += 1 else: imp_str += f\"{imp_counter}. <@{player_ids[i]}>\\n\"", "# 4 parameters: # - [List] of ID's of all players # -", "error_message = \"There are not enough people in your voice channel!\" await self.__show_error(ctx,", "to the scoring database, we need to fill # 4 parameters: # -", "Store in database self.game_db.add_game(member_ids) # Send embed listing all ids as mentions players_str", "including bot self.required_x = 5 # Not including bot self.game_db = GameDatabase() #", "= Embed( # title=\"Void\", # color=Color.dark_gray(), # description=desc # ) # # await", "= self.game_db.imps[game_id] player_names = [] for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) #", "\"You are not in a voice channel!\") return # Get list of all", "title = \"Game Results Canceled!\" color = Color.red() if all_reactions[0].count == self.required_checks+1: title", "= [str(i) for i in player_ids] imp_ids = [str(i) for i in imp_ids[0]]", "met all_reactions = reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count:", "embed title = \"Game Results Canceled!\" color = Color.red() if all_reactions[0].count == self.required_checks+1:", "represents whether crew won or not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names = []", "await ctx.send(embed=embed) # React to the message with the check and X Emoji", "game if self.required_reactions threshold is met all_reactions = reaction.message.reactions if self.required_checks+1 == all_reactions[0].count", "\"Void\" # # embed = Embed( # title=\"Void\", # color=Color.dark_gray(), # description=desc #", "Error if no game found await self.__show_error(ctx, \"You are not in a game!\")", "Color.red() if all_reactions[0].count == self.required_checks+1: title = \"Game Results Submitted!\" color = Color.green()", "ctx.send(embed=embed) @commands.command() async def score(self, ctx): # Get message content = ctx.message.content.split()[1:] #", "imp's\\n\\ place on the list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID:", "with the check and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed", "is None: await self.__show_error(ctx, \"You are not in a voice channel!\") return #", "Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue() ) # crew_str = \"1. Dale\\n2. Peter\\n", "= Color.red() if all_reactions[0].count == self.required_checks+1: title = \"Game Results Submitted!\" color =", "backend.game_database import GameDatabase import backend.commands as db class Automated(commands.Cog): def __init__(self, bot): self.bot", "did_imps_win = self.game_db.imps[game_id] player_names = [] for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name )", "return # Get list of all 10 ID's voice_channel = voice_state.channel member_ids =", "old_embed = reaction.message.embeds[0] # Edit embed message embed = Embed( title=title, color=color, description=old_embed.description", "not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return # Find game with author try:", "color=Color.blue() ) # crew_str = \"1. Dale\\n2. Peter\\n 3. Steve\" # crew_counter =", "in the game game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove reaction if", "return # Description of embed will be based on winner if content[2].upper() ==", "db class Automated(commands.Cog): def __init__(self, bot): self.bot = bot self.player_count = 10 self.required_checks", "not in a VC if voice_state is None: await self.__show_error(ctx, \"You are not", "of ID's of all impostors # - [Boolean] that represents whether crew won", "ctx): # desc = \"Void\" # # embed = Embed( # title=\"Void\", #", "\"Game is already being scored!\") return # Description of embed will be based", "Peter\\n 3. Steve\" # crew_counter = 1 # imp_str = \"1. John\\n2. David\"", "1 crew_str = imp_str = \"\" crew_counter = imp_counter = 1 for i", "inline=True ) await reaction.message.edit(embed=embed) # Don't score game if it's canceled if self.required_x+1", "= 1 crew_str = imp_str = \"\" crew_counter = imp_counter = 1 for", "description=old_embed.description ) for field in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed)", "if not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return # Find game with author", "of players in the game game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove", "voice_state is None: await self.__show_error(ctx, \"You are not in a voice channel!\") return", "isn't in the game if not user.id in player_ids and not user.bot: await", "whether crew won or not imp_ids, did_imps_win = self.game_db.imps[game_id] player_names = [] for", ") # Add game to database player_ids = [str(i) for i in player_ids]", "# @commands.command() # async def void(self, ctx): # desc = \"Void\" # #", "__show_error(self, ctx, error_message): embed = Embed( title=\"Error!\", color=Color.red(), description=error_message ) await ctx.send(embed=embed) @commands.Cog.listener()", "db.add_game(player_ids, player_names, imp_ids, not did_imps_win) @commands.command() async def start(self, ctx): # Find VC", "the same ] if not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return # Find", "finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game ID: {self.game_db.game_number-1}\") await ctx.send(embed=embed) @commands.command() async def score(self,", "players # - [List] of display names of all players # - [List]", "be based on winner if content[2].upper() == \"I\": winner_str = \"`Impostors Win!`\" else:", "game found await self.__show_error(ctx, \"You are not in a game!\") return # Check", "Valid game winner content[0] != content[1] # Imps are not the same ]", "import commands from discord import Color, Embed from backend.game_database import GameDatabase import backend.commands", "# Send embed listing all ids as mentions players_str = \"\" number =", "= ctx.author voice_state = member.voice # Error if not in a VC if", "# React to the message with the check and X Emoji await pending_msg.add_reaction(\"\\U00002705\")", "crew_str = imp_str = \"\" crew_counter = imp_counter = 1 for i in", "check and X Emoji await pending_msg.add_reaction(\"\\U00002705\") await pending_msg.add_reaction(\"\\U0000274C\") # Give embed message id", "bot self.game_db = GameDatabase() # Print out error as embed async def __show_error(self,", "reaction.message.id # Check if message is a pending game if not msg_id in", "if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game from pending games self.game_db.remove_pending_game(game_id)", "we need to fill # 4 parameters: # - [List] of ID's of", "self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id) except: # Error if no game found await self.__show_error(ctx,", "the game if not user.id in player_ids and not user.bot: await reaction.message.remove_reaction(reaction.emoji, user)", "wait for 6 reactions embed = Embed( title=\"Pending Game Results!\", description=winner_str, color=Color.blue() )", "is met all_reactions = reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\ or self.required_x+1 ==", "already being scored if self.game_db.is_game_pending(game_id): await self.__show_error(ctx, \"Game is already being scored!\") return", "ID's of all players # - [List] of display names of all players", "not the same ] if not all(format_checks): await self.__show_error(ctx, \"Incorrect format!\") return #", "game with author try: author_id = ctx.author.id player_ids = self.game_db.get_game(author_id) game_id = self.game_db.get_game_id(author_id)", "= 5 # Not including bot self.game_db = GameDatabase() # Print out error", "# Get info from old embed old_embed = reaction.message.embeds[0] # Edit embed message", "message embed = Embed( title=title, color=color, description=old_embed.description ) for field in old_embed.fields: embed.add_field(", "are valid numbers content[1] in valid_nums, # Imps are valid numbers content[2].upper() in", "id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game to database player_ids =", "names of all players # - [List] of ID's of all impostors #", "all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game from pending games self.game_db.remove_pending_game(game_id) # To make", "the imp's\\n\\ place on the list.\" embed.add_field( name=\"When finished...\", value=scoring_help, inline=True ) embed.set_footer(text=f\"Game", "in player_ids] imp_ids = [str(i) for i in imp_ids[0]] db.add_game(player_ids, player_names, imp_ids, not", "list of all 10 ID's voice_channel = voice_state.channel member_ids = list(voice_channel.voice_states.keys()) # Error", "embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await ctx.send(embed=embed) # React to the message with", "number = 1 for id in member_ids: players_str += f\"{number}. <@{id}>\\n\" number +=", "self.required_reactions threshold is met all_reactions = reaction.message.reactions if self.required_checks+1 == all_reactions[0].count \\ or", "all players # - [List] of ID's of all impostors # - [Boolean]", "player who typed command member = ctx.author voice_state = member.voice # Error if", "ctx.message.content.split()[1:] # Error if format is incorrect if len(content) != 3: await self.__show_error(ctx,", "= bot self.player_count = 10 self.required_checks = 8 # Not including bot self.required_x", "description=winner_str, color=Color.blue() ) # crew_str = \"1. Dale\\n2. Peter\\n 3. Steve\" # crew_counter", "all_reactions[0].count \\ or self.required_x+1 == all_reactions[1].count: # Title variable is for embed title", "import backend.commands as db class Automated(commands.Cog): def __init__(self, bot): self.bot = bot self.player_count", "await self.__show_error(ctx, \"Game is already being scored!\") return # Description of embed will", "in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name ) # Add game to database player_ids = [str(i)", "= 1 for i in range(len(player_ids)): if i+1 != int(content[0]) and i+1 !=", "of all players # - [List] of ID's of all impostors # -", "async def on_reaction_add(self, reaction, user): msg_id = reaction.message.id # Check if message is", "imp_ids, did_imps_win = self.game_db.imps[game_id] player_names = [] for id in player_ids: player_names.append( reaction.message.guild.get_member(id).display_name", "+= f\"{number}. <@{id}>\\n\" number += 1 embed = Embed( title=\"Game Setup\", color=Color.blue(), )", "reaction.message.edit(embed=embed) # Don't score game if it's canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id)", "= \"\" number = 1 for id in member_ids: players_str += f\"{number}. <@{id}>\\n\"", "if it's canceled if self.required_x+1 == all_reactions[1].count: self.game_db.reverse_pending(game_id) return # Remove game from", "embed old_embed = reaction.message.embeds[0] # Edit embed message embed = Embed( title=title, color=color,", "numbers content[1] in valid_nums, # Imps are valid numbers content[2].upper() in [\"I\", \"C\"],", "1 # imp_str = \"1. John\\n2. David\" # imp_counter = 1 crew_str =", "[List] of ID's of all players # - [List] of display names of", ") for field in old_embed.fields: embed.add_field( name=field.name, value=field.value, inline=True ) await reaction.message.edit(embed=embed) #", "inline=True ) embed.add_field( name=\"Impostors\", value=imp_str, inline=True ) embed.set_footer(text=f\"{self.required_checks} reactions required.\") pending_msg = await", "= \"There are not enough people in your voice channel!\" await self.__show_error(ctx, error_message)", "bot self.required_x = 5 # Not including bot self.game_db = GameDatabase() # Print", "game_id = self.game_db.pending[msg_id] player_ids = self.game_db.games[game_id] # Remove reaction if user isn't in", "embed = Embed( # title=\"Void\", # color=Color.dark_gray(), # description=desc # ) # #", "Imps are valid numbers content[1] in valid_nums, # Imps are valid numbers content[2].upper()", "\"There are not enough people in your voice channel!\" await self.__show_error(ctx, error_message) return", "channel!\" await self.__show_error(ctx, error_message) return # Store in database self.game_db.add_game(member_ids) # Send embed", "\"\" crew_counter = imp_counter = 1 for i in range(len(player_ids)): if i+1 !=", "out error as embed async def __show_error(self, ctx, error_message): embed = Embed( title=\"Error!\",", "from backend.game_database import GameDatabase import backend.commands as db class Automated(commands.Cog): def __init__(self, bot):", "player_ids = [str(i) for i in player_ids] imp_ids = [str(i) for i in" ]
[ "__name__ == \"__main__\": default_file = \"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file)", "\"__main__\": default_file = \"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f =", "jtool2 #@author simo #@category iOS.kernel from utils.methods import * if __name__ == \"__main__\":", "== \"__main__\": default_file = \"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f", "#@category iOS.kernel from utils.methods import * if __name__ == \"__main__\": default_file = \"test\"", "askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\") buf = f.read().split('\\n') i =", "i = 0 for line in buf: if len(line) == 0: continue addr", "symbol file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\") buf = f.read().split('\\n') i = 0", "= 0 for line in buf: if len(line) == 0: continue addr ,", ", symbol , empty = line.split(\"|\") if len(symbol) == 0: continue if \"func_\"", "\"func_\" in symbol: continue print addr,symbol symbol = symbol.strip()#.replace(\" \",\"_\") symbolicate(addr,symbol) i+= 1", "\"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\") buf =", "file: \",default_file) f = open(fname,\"rb+\") buf = f.read().split('\\n') i = 0 for line", "f.read().split('\\n') i = 0 for line in buf: if len(line) == 0: continue", "buf = f.read().split('\\n') i = 0 for line in buf: if len(line) ==", "= open(fname,\"rb+\") buf = f.read().split('\\n') i = 0 for line in buf: if", "= f.read().split('\\n') i = 0 for line in buf: if len(line) == 0:", "from utils.methods import * if __name__ == \"__main__\": default_file = \"test\" fname =", "\",default_file) f = open(fname,\"rb+\") buf = f.read().split('\\n') i = 0 for line in", "# Symbolicate the kernelcache from jtool2 #@author simo #@category iOS.kernel from utils.methods import", ", empty = line.split(\"|\") if len(symbol) == 0: continue if \"func_\" in symbol:", "if len(line) == 0: continue addr , symbol , empty = line.split(\"|\") if", "default_file = \"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\")", "import * if __name__ == \"__main__\": default_file = \"test\" fname = askString(\"Kernelcache symbol", "the kernelcache from jtool2 #@author simo #@category iOS.kernel from utils.methods import * if", "= askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\") buf = f.read().split('\\n') i", "addr , symbol , empty = line.split(\"|\") if len(symbol) == 0: continue if", "* if __name__ == \"__main__\": default_file = \"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol", "fname = askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\") buf = f.read().split('\\n')", "== 0: continue if \"func_\" in symbol: continue print addr,symbol symbol = symbol.strip()#.replace(\"", "0: continue if \"func_\" in symbol: continue print addr,symbol symbol = symbol.strip()#.replace(\" \",\"_\")", "file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\") buf = f.read().split('\\n') i = 0 for", "Symbolicate the kernelcache from jtool2 #@author simo #@category iOS.kernel from utils.methods import *", "if __name__ == \"__main__\": default_file = \"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol file:", "0: continue addr , symbol , empty = line.split(\"|\") if len(symbol) == 0:", "kernelcache from jtool2 #@author simo #@category iOS.kernel from utils.methods import * if __name__", "buf: if len(line) == 0: continue addr , symbol , empty = line.split(\"|\")", "line.split(\"|\") if len(symbol) == 0: continue if \"func_\" in symbol: continue print addr,symbol", "continue addr , symbol , empty = line.split(\"|\") if len(symbol) == 0: continue", "symbol , empty = line.split(\"|\") if len(symbol) == 0: continue if \"func_\" in", "if len(symbol) == 0: continue if \"func_\" in symbol: continue print addr,symbol symbol", "len(symbol) == 0: continue if \"func_\" in symbol: continue print addr,symbol symbol =", "if \"func_\" in symbol: continue print addr,symbol symbol = symbol.strip()#.replace(\" \",\"_\") symbolicate(addr,symbol) i+=", "f = open(fname,\"rb+\") buf = f.read().split('\\n') i = 0 for line in buf:", "iOS.kernel from utils.methods import * if __name__ == \"__main__\": default_file = \"test\" fname", "continue if \"func_\" in symbol: continue print addr,symbol symbol = symbol.strip()#.replace(\" \",\"_\") symbolicate(addr,symbol)", "len(line) == 0: continue addr , symbol , empty = line.split(\"|\") if len(symbol)", "utils.methods import * if __name__ == \"__main__\": default_file = \"test\" fname = askString(\"Kernelcache", "in buf: if len(line) == 0: continue addr , symbol , empty =", "#@author simo #@category iOS.kernel from utils.methods import * if __name__ == \"__main__\": default_file", "open(fname,\"rb+\") buf = f.read().split('\\n') i = 0 for line in buf: if len(line)", "from jtool2 #@author simo #@category iOS.kernel from utils.methods import * if __name__ ==", "line in buf: if len(line) == 0: continue addr , symbol , empty", "= line.split(\"|\") if len(symbol) == 0: continue if \"func_\" in symbol: continue print", "empty = line.split(\"|\") if len(symbol) == 0: continue if \"func_\" in symbol: continue", "for line in buf: if len(line) == 0: continue addr , symbol ,", "simo #@category iOS.kernel from utils.methods import * if __name__ == \"__main__\": default_file =", "== 0: continue addr , symbol , empty = line.split(\"|\") if len(symbol) ==", "0 for line in buf: if len(line) == 0: continue addr , symbol", "= \"test\" fname = askString(\"Kernelcache symbol file\",\"Symbol file: \",default_file) f = open(fname,\"rb+\") buf" ]
[ "writing, software # distributed under the License is distributed on an \"AS IS\"", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "some types randomly assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value == 1 assert Type.INT32", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "Type from pycylon.data.data_type import Layout def test_data_types_1(): # Here just check some types", "License. # You may obtain a copy of the License at # #", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "governing permissions and # limitations under the License. ## ''' Run test: >>", "import Layout def test_data_types_1(): # Here just check some types randomly assert Type.BOOL.value", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "python/test/test_data_types.py ''' from pycylon.data.data_type import Type from pycylon.data.data_type import Layout def test_data_types_1(): #", "## # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "import Type from pycylon.data.data_type import Layout def test_data_types_1(): # Here just check some", "pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type import Type from pycylon.data.data_type import Layout def", "ANY KIND, either express or implied. # See the License for the specific", "Run test: >> pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type import Type from pycylon.data.data_type", "from pycylon.data.data_type import Layout def test_data_types_1(): # Here just check some types randomly", "for the specific language governing permissions and # limitations under the License. ##", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "0 assert Layout.FIXED_WIDTH.value == 1 assert Type.INT32 == 6 assert Layout.FIXED_WIDTH == 1", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "language governing permissions and # limitations under the License. ## ''' Run test:", "See the License for the specific language governing permissions and # limitations under", "from pycylon.data.data_type import Type from pycylon.data.data_type import Layout def test_data_types_1(): # Here just", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "specific language governing permissions and # limitations under the License. ## ''' Run", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "the specific language governing permissions and # limitations under the License. ## '''", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "pycylon.data.data_type import Layout def test_data_types_1(): # Here just check some types randomly assert", "assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value == 1 assert Type.INT32 == 6 assert", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "permissions and # limitations under the License. ## ''' Run test: >> pytest", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "OF ANY KIND, either express or implied. # See the License for the", "2.0 (the \"License\"); # you may not use this file except in compliance", "# you may not use this file except in compliance with the License.", "# Here just check some types randomly assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value", "agreed to in writing, software # distributed under the License is distributed on", "def test_data_types_1(): # Here just check some types randomly assert Type.BOOL.value == 0", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "and # limitations under the License. ## ''' Run test: >> pytest -q", "(the \"License\"); # you may not use this file except in compliance with", "test: >> pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type import Type from pycylon.data.data_type import", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "pycylon.data.data_type import Type from pycylon.data.data_type import Layout def test_data_types_1(): # Here just check", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "License. ## ''' Run test: >> pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type import", "''' Run test: >> pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type import Type from", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "either express or implied. # See the License for the specific language governing", "-q python/test/test_data_types.py ''' from pycylon.data.data_type import Type from pycylon.data.data_type import Layout def test_data_types_1():", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "file except in compliance with the License. # You may obtain a copy", "the License. ## ''' Run test: >> pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type", "check some types randomly assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value == 1 assert", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "types randomly assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value == 1 assert Type.INT32 ==", "''' from pycylon.data.data_type import Type from pycylon.data.data_type import Layout def test_data_types_1(): # Here", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "test_data_types_1(): # Here just check some types randomly assert Type.BOOL.value == 0 assert", "the License. # You may obtain a copy of the License at #", "# limitations under the License. ## ''' Run test: >> pytest -q python/test/test_data_types.py", "to in writing, software # distributed under the License is distributed on an", "Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value == 1 assert Type.INT32 == 6 assert Layout.FIXED_WIDTH", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "## ''' Run test: >> pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type import Type", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "limitations under the License. ## ''' Run test: >> pytest -q python/test/test_data_types.py '''", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "randomly assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value == 1 assert Type.INT32 == 6", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "applicable law or agreed to in writing, software # distributed under the License", "under the License. ## ''' Run test: >> pytest -q python/test/test_data_types.py ''' from", "Layout def test_data_types_1(): # Here just check some types randomly assert Type.BOOL.value ==", "== 0 assert Layout.FIXED_WIDTH.value == 1 assert Type.INT32 == 6 assert Layout.FIXED_WIDTH ==", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", ">> pytest -q python/test/test_data_types.py ''' from pycylon.data.data_type import Type from pycylon.data.data_type import Layout", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "with the License. # You may obtain a copy of the License at", "just check some types randomly assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value == 1", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "Here just check some types randomly assert Type.BOOL.value == 0 assert Layout.FIXED_WIDTH.value ==", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC source_id values for our SIMPLE sources", "foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string)", "# Reload from directory, if needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For each", "in the band list that has version flags and publications for source, bdnyc_id", "the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests',", "the BDNYC database into SIMPLE from astrodbkit2.astrodb import Database, and_ from sqlalchemy import", "if len(bd_source) != 1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab", "source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <=", "bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0: continue # Insert into the database new_data", "old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) > 0: if (row['band'], row['publication_shortname']) in", "connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes',", "zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already in database for reference {row['publication_shortname']}\") new_data", "verbose: print(f\"{source}: {row['band']} already in database for reference {row['publication_shortname']}\") new_data = None continue", "for BDNYC column overrides verbose = True # -------------------------------------------------------------------------------------- # Establish connection to", "Establish connection to databases # Note that special parameters have to be passed", "bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0: continue # Insert into", "import Database, and_ from sqlalchemy import types # for BDNYC column overrides verbose", "BDNYC column overrides verbose = True # -------------------------------------------------------------------------------------- # Establish connection to databases", "sources that already have photometry in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry =", "row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if", "only photometry in the band list that has version flags and publications for", "include sources that already have photometry in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry", "sources_with_photometry = [s[0] for s in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() #", "1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only photometry in", "Grab only photometry in the band list that has version flags and publications", "SIMPLE sources source_dict = {} for i, row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'],", "source in SIMPLE, search in BDNYC and grab specified photometry # Will be", "in BDNYC and grab specified photometry # Will be only grabbing WISE data", "# -------------------------------------------------------------------------------------- # Establish connection to databases # Note that special parameters have", "each source in SIMPLE, search in BDNYC and grab specified photometry # Will", "Database, and_ from sqlalchemy import types # for BDNYC column overrides verbose =", "'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include sources that already", "# Will be only grabbing WISE data for now telescope = 'WISE' band_list", "== source).pandas() if len(old_data) > 0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if", "source_id values for our SIMPLE sources source_dict = {} for i, row in", "{bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas()", "the database new_data = [] for i, row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source", "in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already in database for reference {row['publication_shortname']}\")", "is not None: print(f\"{source} : Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) # -------------------------------------------------------------------------------------- #", "that has version flags and publications for source, bdnyc_id in source_dict.items(): print(f'{source} :", "now telescope = 'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include", "for now telescope = 'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't", "WISE data for now telescope = 'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4']", "[s[0] for s in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the", "row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source)", "the band list that has version flags and publications for source, bdnyc_id in", "work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes',", "grabbing WISE data for now telescope = 'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3',", "in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s in temp]", "not None: print(f\"{source} : Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) # -------------------------------------------------------------------------------------- # Output", "BDNYC source_id values for our SIMPLE sources source_dict = {} for i, row", "if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already in database", "verbose = True # -------------------------------------------------------------------------------------- # Establish connection to databases # Note that", "SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from directory,", "in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source) !=", "schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore',", "= 'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include sources that", "2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0: continue # Insert into the database", "database for reference {row['publication_shortname']}\") new_data = None continue datum = {'source': source, 'band':", "= bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source) != 1: print(f\"ERROR matching", "# SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from", "directory, if needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For each source in SIMPLE,", "passed to allow the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc =", "'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string =", "Reload from directory, if needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For each source", "-------------------------------------------------------------------------------------- # Reload from directory, if needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For", "db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For each source in SIMPLE, search in BDNYC", "connection to databases # Note that special parameters have to be passed to", "== 0: continue # Insert into the database new_data = [] for i,", "'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum':", "to allow the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string,", "# Grab only photometry in the band list that has version flags and", "# Note that special parameters have to be passed to allow the BDNYC", "old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already in database for reference {row['publication_shortname']}\") new_data =", "row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data is not None: print(f\"{source} :", "-------------------------------------------------------------------------------------- # For each source in SIMPLE, search in BDNYC and grab specified", "= bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data)", "for i, row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) >", "= True # -------------------------------------------------------------------------------------- # Establish connection to databases # Note that special", "allow the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog',", "'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'],", "= 'sqlite:///SIMPLE.db' db = Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from directory, if needed", "= ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include sources that already have photometry", "new_data = None continue datum = {'source': source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error':", "types # for BDNYC column overrides verbose = True # -------------------------------------------------------------------------------------- # Establish", "sqlalchemy import types # for BDNYC column overrides verbose = True # --------------------------------------------------------------------------------------", "needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For each source in SIMPLE, search in", "sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC source_id values for our", "== bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0: continue", "0: continue # Insert into the database new_data = [] for i, row", "telescope = 'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include sources", "print(f\"{source}: {row['band']} already in database for reference {row['publication_shortname']}\") new_data = None continue datum", "for source, bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id ==", "> 0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already", "'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) #", "that already have photometry in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0]", "'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE", "filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC source_id values for our SIMPLE sources source_dict", "source).pandas() if len(old_data) > 0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose:", "Script to add photometry from the BDNYC database into SIMPLE from astrodbkit2.astrodb import", "our SIMPLE sources source_dict = {} for i, row in sources.iterrows(): bd_source =", "len(bd_source) != 1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only", "'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()})", "Will be only grabbing WISE data for now telescope = 'WISE' band_list =", "BDNYC database into SIMPLE from astrodbkit2.astrodb import Database, and_ from sqlalchemy import types", "column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) #", "= {} for i, row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation',", "# for BDNYC column overrides verbose = True # -------------------------------------------------------------------------------------- # Establish connection", "'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string", "types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) # -------------------------------------------------------------------------------------- # Reload", "search in BDNYC and grab specified photometry # Will be only grabbing WISE", "to databases # Note that special parameters have to be passed to allow", "len(bd_data) == 0: continue # Insert into the database new_data = [] for", "bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) > 0: if (row['band'], row['publication_shortname'])", "from directory, if needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For each source in", "'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data", "row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already in database for reference", "continue datum = {'source': source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE',", "types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) # --------------------------------------------------------------------------------------", "'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data is not None:", "bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s in temp] sources =", "= None continue datum = {'source': source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'],", "into SIMPLE from astrodbkit2.astrodb import Database, and_ from sqlalchemy import types # for", "pandas() if len(bd_data) == 0: continue # Insert into the database new_data =", "None: print(f\"{source} : Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) # -------------------------------------------------------------------------------------- # Output changes", "datum = {'source': source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference':", "already have photometry in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for", "bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source) != 1: print(f\"ERROR", "fmt='pandas') if len(bd_source) != 1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) #", "version flags and publications for source, bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data", "to be passed to allow the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db'", "= {'source': source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'],", "be only grabbing WISE data for now telescope = 'WISE' band_list = ['WISE_W1',", "grab specified photometry # Will be only grabbing WISE data for now telescope", "else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only photometry in the band list that", "'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) # -------------------------------------------------------------------------------------- #", "= int(bd_source['id'].values[0]) # Grab only photometry in the band list that has version", "0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already in", "'WISE_W4'] # Don't include sources that already have photometry in these bands temp", "'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data is not None: print(f\"{source} : Ingesting", "= db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC source_id values for our SIMPLE", "output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source) != 1: print(f\"ERROR matching {row['source']}\") else:", "'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data is not None: print(f\"{source}", "databases # Note that special parameters have to be passed to allow the", "properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems',", "for our SIMPLE sources source_dict = {} for i, row in sources.iterrows(): bd_source", "'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include sources that already have photometry in these", "to add photometry from the BDNYC database into SIMPLE from astrodbkit2.astrodb import Database,", "from sqlalchemy import types # for BDNYC column overrides verbose = True #", "{} for i, row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']},", "new_data = [] for i, row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas()", "db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas()", "'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments':", "connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from directory, if", ": {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\", "SIMPLE from astrodbkit2.astrodb import Database, and_ from sqlalchemy import types # for BDNYC", "bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0: continue # Insert", "bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources',", "'sqlite:///SIMPLE.db' db = Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from directory, if needed db.load_database('data',", "Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id',", "['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include sources that already have photometry in", "band list that has version flags and publications for source, bdnyc_id in source_dict.items():", "# Don't include sources that already have photometry in these bands temp =", "# Get the BDNYC source_id values for our SIMPLE sources source_dict = {}", "flags and publications for source, bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data =", "already in database for reference {row['publication_shortname']}\") new_data = None continue datum = {'source':", "'comments': row['comments']} new_data.append(datum) if new_data is not None: print(f\"{source} : Ingesting new data:", "source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'],", "only grabbing WISE data for now telescope = 'WISE' band_list = ['WISE_W1', 'WISE_W2',", "data for now telescope = 'WISE' band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] #", "photometry in the band list that has version flags and publications for source,", "list that has version flags and publications for source, bdnyc_id in source_dict.items(): print(f'{source}", "[] for i, row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data)", "the BDNYC source_id values for our SIMPLE sources source_dict = {} for i,", "Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) # -------------------------------------------------------------------------------------- # Output changes to directory db.save_database('data')", "# Insert into the database new_data = [] for i, row in bd_data.iterrows():", "if needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- # For each source in SIMPLE, search", "for i, row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas')", "pandas() # Get the BDNYC source_id values for our SIMPLE sources source_dict =", "(row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']} already in database for", "{row['publication_shortname']}\") new_data = None continue datum = {'source': source, 'band': row['band'], 'magnitude': row['magnitude'],", "Note that special parameters have to be passed to allow the BDNYC schema", "print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only photometry in the", "and publications for source, bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\", "'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data is not", "database into SIMPLE from astrodbkit2.astrodb import Database, and_ from sqlalchemy import types #", "primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db =", "bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0: continue #", "bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) ==", "reference {row['publication_shortname']}\") new_data = None continue datum = {'source': source, 'band': row['band'], 'magnitude':", "verbose=False) # -------------------------------------------------------------------------------------- # For each source in SIMPLE, search in BDNYC and", "source_dict = {} for i, row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources':", "# For each source in SIMPLE, search in BDNYC and grab specified photometry", "<= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0: continue # Insert into the", "these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s in temp] sources", "Insert into the database new_data = [] for i, row in bd_data.iterrows(): old_data", "be passed to allow the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc", "column overrides verbose = True # -------------------------------------------------------------------------------------- # Establish connection to databases #", "For each source in SIMPLE, search in BDNYC and grab specified photometry #", "in SIMPLE, search in BDNYC and grab specified photometry # Will be only", "i, row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) > 0:", "photometry in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s in", "if verbose: print(f\"{source}: {row['band']} already in database for reference {row['publication_shortname']}\") new_data = None", "photometry from the BDNYC database into SIMPLE from astrodbkit2.astrodb import Database, and_ from", "-------------------------------------------------------------------------------------- # Establish connection to databases # Note that special parameters have to", "['designation', 'names']}, fmt='pandas') if len(bd_source) != 1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] =", "None continue datum = {'source': source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope':", "= db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) > 0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(),", "import types # for BDNYC column overrides verbose = True # -------------------------------------------------------------------------------------- #", "new_data.append(datum) if new_data is not None: print(f\"{source} : Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data)", "# Establish connection to databases # Note that special parameters have to be", "BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications',", "= 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions',", "# -------------------------------------------------------------------------------------- # Reload from directory, if needed db.load_database('data', verbose=False) # -------------------------------------------------------------------------------------- #", "db = Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from directory, if needed db.load_database('data', verbose=False)", "sources source_dict = {} for i, row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources',", "int(bd_source['id'].values[0]) # Grab only photometry in the band list that has version flags", "row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']}", "!= 1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only photometry", "BDNYC and grab specified photometry # Will be only grabbing WISE data for", "from the BDNYC database into SIMPLE from astrodbkit2.astrodb import Database, and_ from sqlalchemy", "Don't include sources that already have photometry in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all()", "row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data is not None: print(f\"{source} : Ingesting new", "print(f\"{source} : Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) # -------------------------------------------------------------------------------------- # Output changes to", "continue # Insert into the database new_data = [] for i, row in", "= Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id',", "s in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC source_id", "len(old_data) > 0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}: {row['band']}", "values for our SIMPLE sources source_dict = {} for i, row in sources.iterrows():", "db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) > 0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()):", "{'source': source, 'band': row['band'], 'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch':", "reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum':", "in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) > 0: if (row['band'],", "'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(),", "database new_data = [] for i, row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source ==", "overrides verbose = True # -------------------------------------------------------------------------------------- # Establish connection to databases # Note", "have photometry in these bands temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s", "row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum) if new_data is", "parameters have to be passed to allow the BDNYC schema work properly connection_string", "'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db'", "'WISE_W3', 'WISE_W4'] # Don't include sources that already have photometry in these bands", "sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source) != 1:", "print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2,", "from astrodbkit2.astrodb import Database, and_ from sqlalchemy import types # for BDNYC column", "for s in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC", "have to be passed to allow the BDNYC schema work properly connection_string =", "new_data is not None: print(f\"{source} : Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) # --------------------------------------------------------------------------------------", "specified photometry # Will be only grabbing WISE data for now telescope =", "if new_data is not None: print(f\"{source} : Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) #", "photometry # Will be only grabbing WISE data for now telescope = 'WISE'", "bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None),", "temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC source_id values for", "= [] for i, row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if", "Get the BDNYC source_id values for our SIMPLE sources source_dict = {} for", "# Script to add photometry from the BDNYC database into SIMPLE from astrodbkit2.astrodb", ": Ingesting new data: {new_data}\") db.Photometry.insert().execute(new_data) # -------------------------------------------------------------------------------------- # Output changes to directory", "astrodbkit2.astrodb import Database, and_ from sqlalchemy import types # for BDNYC column overrides", "Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from directory, if needed db.load_database('data', verbose=False) # --------------------------------------------------------------------------------------", "{row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only photometry in the band list", "table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source) != 1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']]", "into the database new_data = [] for i, row in bd_data.iterrows(): old_data =", "has version flags and publications for source, bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}')", "special parameters have to be passed to allow the BDNYC schema work properly", "= db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\", "in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get the BDNYC source_id values", "True # -------------------------------------------------------------------------------------- # Establish connection to databases # Note that special parameters", "if len(bd_data) == 0: continue # Insert into the database new_data = []", "and grab specified photometry # Will be only grabbing WISE data for now", "'names']}, fmt='pandas') if len(bd_source) != 1: print(f\"ERROR matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0])", "primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db", "i, row in sources.iterrows(): bd_source = bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if", "matching {row['source']}\") else: source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only photometry in the band", "row['comments']} new_data.append(datum) if new_data is not None: print(f\"{source} : Ingesting new data: {new_data}\")", "band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4'] # Don't include sources that already have", "temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all() sources_with_photometry = [s[0] for s in temp] sources = db.query(db.Sources).\\", "{row['band']} already in database for reference {row['publication_shortname']}\") new_data = None continue datum =", "row in bd_data.iterrows(): old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas() if len(old_data) > 0: if", "# -------------------------------------------------------------------------------------- # For each source in SIMPLE, search in BDNYC and grab", "in database for reference {row['publication_shortname']}\") new_data = None continue datum = {'source': source,", "= Database(connection_string) # -------------------------------------------------------------------------------------- # Reload from directory, if needed db.load_database('data', verbose=False) #", "bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if", "in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version", "publications for source, bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id", "SIMPLE, search in BDNYC and grab specified photometry # Will be only grabbing", "bdnyc.search_object(row['source'], output_table='sources', table_names={'sources': ['designation', 'names']}, fmt='pandas') if len(bd_source) != 1: print(f\"ERROR matching {row['source']}\")", "for reference {row['publication_shortname']}\") new_data = None continue datum = {'source': source, 'band': row['band'],", "source_dict[row['source']] = int(bd_source['id'].values[0]) # Grab only photometry in the band list that has", "and_ from sqlalchemy import types # for BDNYC column overrides verbose = True", "that special parameters have to be passed to allow the BDNYC schema work", "= [s[0] for s in temp] sources = db.query(db.Sources).\\ filter(db.Sources.c.source.notin_(sources_with_photometry)).\\ pandas() # Get", "source, bdnyc_id in source_dict.items(): print(f'{source} : {bdnyc_id}') bd_data = bdnyc.query(bdnyc.photometry).\\ filter(and_(bdnyc.photometry.c.source_id == bdnyc_id,", "if len(old_data) > 0: if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(), old_data['reference'].tolist()): if verbose: print(f\"{source}:", "'magnitude': row['magnitude'], 'magnitude_error': row['magnitude_unc'], 'telescope': 'WISE', 'reference': row['publication_shortname'], 'epoch': row['epoch'], 'comments': row['comments']} new_data.append(datum)", "filter(and_(bdnyc.photometry.c.source_id == bdnyc_id, bdnyc.photometry.c.publication_shortname.isnot(None), bdnyc.photometry.c.version <= 2, bdnyc.photometry.c.band.in_(band_list))).\\ pandas() if len(bd_data) == 0:", "add photometry from the BDNYC database into SIMPLE from astrodbkit2.astrodb import Database, and_" ]
[ "import User from .drink_dto import Drinks from .dessert_dto import Dessert from .role_dto import", "User from .drink_dto import Drinks from .dessert_dto import Dessert from .role_dto import Roles", "from .user_dto import User from .drink_dto import Drinks from .dessert_dto import Dessert from", ".user_dto import User from .drink_dto import Drinks from .dessert_dto import Dessert from .role_dto" ]
[ "遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str) left =", "else 1 while right < strLen: if str[left] == str[right]: # aa... while", "max = (right - left + 1) if (right - left + 1)", "max if max >= (strLen - right - 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环", "JohnnyB0Y. All rights reserved. def domain(): # 0 1 2 3 3 5", ">= (strLen - right - 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right", "探索完成 max = (right - left + 1) if (right - left +", "+ 1) if (right - left + 1) > max else max if", "Copyright © 2021 JohnnyB0Y. All rights reserved. def domain(): # 0 1 2", "not str[right]: # abc ? aba left += 1 continue # 左右探索 while", "\"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs: max =", "pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def", "= len(str) left = 0 right = 1 max = 0 if strLen", "# ab... right += 1 if (left < 0 or right >= strLen)", "aba left += 1 continue # 左右探索 while (left > 0 and right", "str in strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 #", "Created by JohnnyB0Y on 2021/07/11. # Copyright © 2021 JohnnyB0Y. All rights reserved.", "买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa :", "right + 1 < strLen and str[right] is str[right+1]: # aaa.... right +=", "# 左右探索 while (left > 0 and right + 1 < strLen) and", "- left + 1) > max else max if max >= (strLen -", "right - 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right += 1", "= (right - left + 1) if (right - left + 1) >", "left = 0 right = 1 max = 0 if strLen == 0", "right = 1 max = 0 if strLen == 0 else 1 while", "else max if max >= (strLen - right - 1): return max #", "max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零?", "# 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候", "on 2021/07/11. # Copyright © 2021 JohnnyB0Y. All rights reserved. def domain(): #", "3 3 5 12 4 strs = [\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\",", ": 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen", "# 探索完成 max = (right - left + 1) if (right - left", "# # # Created by JohnnyB0Y on 2021/07/11. # Copyright © 2021 JohnnyB0Y.", "2 3 3 5 12 4 strs = [\"\", \"a\", \"aa\", \"aba\", \"baaa\",", "有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): #", "# aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str)", "aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str) left", "1 while right < strLen: if str[left] == str[right]: # aa... while right", "(right - left + 1) if (right - left + 1) > max", "第二步,向左向右探 strLen = len(str) left = 0 right = 1 max = 0", "JohnnyB0Y on 2021/07/11. # Copyright © 2021 JohnnyB0Y. All rights reserved. def domain():", "+= 1 else: # ab... right += 1 if (left < 0 or", "# abc ? aba left += 1 continue # 左右探索 while (left >", "0 right = 1 max = 0 if strLen == 0 else 1", "by JohnnyB0Y on 2021/07/11. # Copyright © 2021 JohnnyB0Y. All rights reserved. def", "domain(): # 0 1 2 3 3 5 12 4 strs = [\"\",", "0 else 1 while right < strLen: if str[left] == str[right]: # aa...", "def domain(): # 0 1 2 3 3 5 12 4 strs =", "# 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str):", "minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候", "[\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs: max", "# aaa.... right += 1 else: # ab... right += 1 if (left", "All rights reserved. def domain(): # 0 1 2 3 3 5 12", "\"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs: max = maxOfPalindrome(str)", "aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探", "1 continue # 左右探索 while (left > 0 and right + 1 <", "< 0 or right >= strLen) or str[left] is not str[right]: # abc", "# 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa", "print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass", "# -*- coding: utf-8 -* # count.py # # # Created by JohnnyB0Y", "+= 1 continue # 左右探索 while (left > 0 and right + 1", "maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴", "? aba left += 1 continue # 左右探索 while (left > 0 and", "max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right += 1 return max # test", "str[left-1] is str[right+1]: left -= 1 right += 1 # 探索完成 max =", "right += 1 else: # ab... right += 1 if (left < 0", "left + 1) > max else max if max >= (strLen - right", "1 right += 1 # 探索完成 max = (right - left + 1)", "(left > 0 and right + 1 < strLen) and str[left-1] is str[right+1]:", "strLen: if str[left] == str[right]: # aa... while right + 1 < strLen", "1 < strLen) and str[left-1] is str[right+1]: left -= 1 right += 1", "str[right]: # abc ? aba left += 1 continue # 左右探索 while (left", "\"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass", "while (left > 0 and right + 1 < strLen) and str[left-1] is", "# # Created by JohnnyB0Y on 2021/07/11. # Copyright © 2021 JohnnyB0Y. All", "pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 #", "1 if (left < 0 or right >= strLen) or str[left] is not", "if strLen == 0 else 1 while right < strLen: if str[left] ==", "1 else: # ab... right += 1 if (left < 0 or right", "while right < strLen: if str[left] == str[right]: # aa... while right +", "-* # count.py # # # Created by JohnnyB0Y on 2021/07/11. # Copyright", "coding: utf-8 -* # count.py # # # Created by JohnnyB0Y on 2021/07/11.", "-= 1 right += 1 # 探索完成 max = (right - left +", "- 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right += 1 return", "+ 1 < strLen) and str[left-1] is str[right+1]: left -= 1 right +=", "strLen = len(str) left = 0 right = 1 max = 0 if", "0 if strLen == 0 else 1 while right < strLen: if str[left]", "str[left] == str[right]: # aa... while right + 1 < strLen and str[right]", "问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 #", "\"aaaabcdefghijjjjklmn\"] for str in strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass #", "str[left] is not str[right]: # abc ? aba left += 1 continue #", "right + 1 < strLen) and str[left-1] is str[right+1]: left -= 1 right", "aa... while right + 1 < strLen and str[right] is str[right+1]: # aaa....", "maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange():", "for str in strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多", "in strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元", "(left < 0 or right >= strLen) or str[left] is not str[right]: #", "> 0 and right + 1 < strLen) and str[left-1] is str[right+1]: left", "strLen) and str[left-1] is str[right+1]: left -= 1 right += 1 # 探索完成", "strs = [\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in", "= maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def", "# 第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str) left = 0 right = 1", "-*- coding: utf-8 -* # count.py # # # Created by JohnnyB0Y on", "= 0 right = 1 max = 0 if strLen == 0 else", "and str[right] is str[right+1]: # aaa.... right += 1 else: # ab... right", "if (right - left + 1) > max else max if max >=", "ab... right += 1 if (left < 0 or right >= strLen) or", "is not str[right]: # abc ? aba left += 1 continue # 左右探索", "+ 1) > max else max if max >= (strLen - right -", "str[right]: # aa... while right + 1 < strLen and str[right] is str[right+1]:", "+ 1 < strLen and str[right] is str[right+1]: # aaa.... right += 1", "- left + 1) if (right - left + 1) > max else", "= [\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs:", "12 4 strs = [\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for", "else: # ab... right += 1 if (left < 0 or right >=", "max >= (strLen - right - 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left =", "5 12 4 strs = [\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"]", "# 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba:", "\"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\",", "# 0 1 2 3 3 5 12 4 strs = [\"\", \"a\",", "is str[right+1]: # aaa.... right += 1 else: # ab... right += 1", "strLen) or str[left] is not str[right]: # abc ? aba left += 1", "and str[left-1] is str[right+1]: left -= 1 right += 1 # 探索完成 max", "- right - 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right +=", "1 < strLen and str[right] is str[right+1]: # aaa.... right += 1 else:", "= 0 if strLen == 0 else 1 while right < strLen: if", "or right >= strLen) or str[left] is not str[right]: # abc ? aba", "if str[left] == str[right]: # aa... while right + 1 < strLen and", "# Copyright © 2021 JohnnyB0Y. All rights reserved. def domain(): # 0 1", "4 strs = [\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str", "> max else max if max >= (strLen - right - 1): return", "1 2 3 3 5 12 4 strs = [\"\", \"a\", \"aa\", \"aba\",", "(strLen - right - 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right", "def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 # aaa:", "0 and right + 1 < strLen) and str[left-1] is str[right+1]: left -=", "reserved. def domain(): # 0 1 2 3 3 5 12 4 strs", "\"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs: max = maxOfPalindrome(str) print(str,", "# aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str) left = 0", "< strLen) and str[left-1] is str[right+1]: left -= 1 right += 1 #", "第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str) left = 0 right = 1 max", "\"baaabd\", \"kkkfcddddddcfkabckkl\", \"aaaabcdefghijjjjklmn\"] for str in strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max)", "© 2021 JohnnyB0Y. All rights reserved. def domain(): # 0 1 2 3", "strLen == 0 else 1 while right < strLen: if str[left] == str[right]:", "aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str) left = 0 right", "max = 0 if strLen == 0 else 1 while right < strLen:", "right < strLen: if str[left] == str[right]: # aa... while right + 1", "while right + 1 < strLen and str[right] is str[right+1]: # aaa.... right", "aaa.... right += 1 else: # ab... right += 1 if (left <", "right >= strLen) or str[left] is not str[right]: # abc ? aba left", "== str[right]: # aa... while right + 1 < strLen and str[right] is", "and right + 1 < strLen) and str[left-1] is str[right+1]: left -= 1", "right += 1 # 探索完成 max = (right - left + 1) if", "def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 #", "max else max if max >= (strLen - right - 1): return max", "+= 1 if (left < 0 or right >= strLen) or str[left] is", "left -= 1 right += 1 # 探索完成 max = (right - left", "# aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 #", "len(str) left = 0 right = 1 max = 0 if strLen ==", "# aa... while right + 1 < strLen and str[right] is str[right+1]: #", "# 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right += 1 return max # test domain()", "1 max = 0 if strLen == 0 else 1 while right <", "str[right+1]: left -= 1 right += 1 # 探索完成 max = (right -", "1) > max else max if max >= (strLen - right - 1):", "2021/07/11. # Copyright © 2021 JohnnyB0Y. All rights reserved. def domain(): # 0", "字符串中的最长回文(Palindrome) def maxOfPalindrome(str): # aa : 遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候", "strLen and str[right] is str[right+1]: # aaa.... right += 1 else: # ab...", "= 1 max = 0 if strLen == 0 else 1 while right", "left += 1 continue # 左右探索 while (left > 0 and right +", "str[right+1]: # aaa.... right += 1 else: # ab... right += 1 if", "return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right += 1 return max #", "strs: max = maxOfPalindrome(str) print(str, \"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 #", "3 5 12 4 strs = [\"\", \"a\", \"aa\", \"aba\", \"baaa\", \"baaabd\", \"kkkfcddddddcfkabckkl\",", "遇到两个重复字符的时候 # aaa: 遇到多个重复字符的时候 # aba: 遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen =", "continue # 左右探索 while (left > 0 and right + 1 < strLen)", "left + 1) if (right - left + 1) > max else max", "max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass # 字符串中的最长回文(Palindrome)", "str[right] is str[right+1]: # aaa.... right += 1 else: # ab... right +=", ">= strLen) or str[left] is not str[right]: # abc ? aba left +=", "abc ? aba left += 1 continue # 左右探索 while (left > 0", "if (left < 0 or right >= strLen) or str[left] is not str[right]:", "1 # 探索完成 max = (right - left + 1) if (right -", "遇到左右字符重复的时候 # 第一步,找到中心轴 # 第二步,向左向右探 strLen = len(str) left = 0 right =", "\"maxOfPalindrome:\", max) pass # 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多 # 买一本书需要27元 # 问?如何用最少的硬币组合正好付清,且不需要对方找零? def minOfCoinChange(): pass #", "utf-8 -* # count.py # # # Created by JohnnyB0Y on 2021/07/11. #", "# count.py # # # Created by JohnnyB0Y on 2021/07/11. # Copyright ©", "# Created by JohnnyB0Y on 2021/07/11. # Copyright © 2021 JohnnyB0Y. All rights", "# 第二步,向左向右探 strLen = len(str) left = 0 right = 1 max =", "== 0 else 1 while right < strLen: if str[left] == str[right]: #", "rights reserved. def domain(): # 0 1 2 3 3 5 12 4", "< strLen and str[right] is str[right+1]: # aaa.... right += 1 else: #", "or str[left] is not str[right]: # abc ? aba left += 1 continue", "+= 1 # 探索完成 max = (right - left + 1) if (right", "(right - left + 1) > max else max if max >= (strLen", "is str[right+1]: left -= 1 right += 1 # 探索完成 max = (right", "< strLen: if str[left] == str[right]: # aa... while right + 1 <", "1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left = right right += 1 return max", "1) if (right - left + 1) > max else max if max", "count.py # # # Created by JohnnyB0Y on 2021/07/11. # Copyright © 2021", "if max >= (strLen - right - 1): return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环 left", "左右探索 while (left > 0 and right + 1 < strLen) and str[left-1]", "right += 1 if (left < 0 or right >= strLen) or str[left]", "0 or right >= strLen) or str[left] is not str[right]: # abc ?", "0 1 2 3 3 5 12 4 strs = [\"\", \"a\", \"aa\",", "2021 JohnnyB0Y. All rights reserved. def domain(): # 0 1 2 3 3" ]
[ "input_bits carry += number bits_count += input_bits while bits_count >= output_bits: number =", "value in data: acc = ((acc << frombits) | value) & max_acc bits", "8, output_bits = 5): \"\"\" Convert an array of N-bits integer into an", "max_acc bits += frombits while bits >= tobits: bits -= tobits ret.append((acc >>", "data: acc = ((acc << frombits) | value) & max_acc bits += frombits", "output_bits if bits_count and output_bits > bits_count: output.append(carry << (output_bits - bits_count)) return", ">= tobits: bits -= tobits ret.append((acc >> bits) & maxv) if pad and", "ret.append((acc >> bits) & maxv) if pad and bits: ret.append((acc << (tobits -", "return bytes(output) def _convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc =", "bits) & maxv) if pad and bits: ret.append((acc << (tobits - bits)) &", "= [] for number in input: carry = carry << input_bits carry +=", "number bits_count += input_bits while bits_count >= output_bits: number = (carry >> (bits_count", "<< tobits) - 1 max_acc = (1 << (frombits + tobits - 1))", "bits_count and output_bits > bits_count: output.append(carry << (output_bits - bits_count)) return bytes(output) def", "bytes(output) def _convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc = 0", "bits += frombits while bits >= tobits: bits -= tobits ret.append((acc >> bits)", "ret = bytearray() maxv = (1 << tobits) - 1 max_acc = (1", "\"\"\"General power-of-2 base conversion.\"\"\" acc = 0 bits = 0 ret = bytearray()", "while bits_count >= output_bits: number = (carry >> (bits_count - output_bits)) output.append(number) carry", "-= output_bits if bits_count and output_bits > bits_count: output.append(carry << (output_bits - bits_count))", "in data: acc = ((acc << frombits) | value) & max_acc bits +=", "carry << input_bits carry += number bits_count += input_bits while bits_count >= output_bits:", "an array of N'-bits integers \"\"\" carry = 0 bits_count = 0 output", "bits_count = 0 output = [] for number in input: carry = carry", "frombits, tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc = 0 bits = 0", "carry = carry << input_bits carry += number bits_count += input_bits while bits_count", "-= (number << bits_count - output_bits) bits_count -= output_bits if bits_count and output_bits", "frombits while bits >= tobits: bits -= tobits ret.append((acc >> bits) & maxv)", "- output_bits) bits_count -= output_bits if bits_count and output_bits > bits_count: output.append(carry <<", "if pad and bits: ret.append((acc << (tobits - bits)) & maxv) return ret", "acc = ((acc << frombits) | value) & max_acc bits += frombits while", "0 bits_count = 0 output = [] for number in input: carry =", "<< input_bits carry += number bits_count += input_bits while bits_count >= output_bits: number", "output_bits = 5): \"\"\" Convert an array of N-bits integer into an array", "[] for number in input: carry = carry << input_bits carry += number", "= carry << input_bits carry += number bits_count += input_bits while bits_count >=", "(1 << (frombits + tobits - 1)) - 1 for value in data:", "(number << bits_count - output_bits) bits_count -= output_bits if bits_count and output_bits >", "<< (frombits + tobits - 1)) - 1 for value in data: acc", "output_bits > bits_count: output.append(carry << (output_bits - bits_count)) return bytes(output) def _convertbits(data, frombits,", "= 0 ret = bytearray() maxv = (1 << tobits) - 1 max_acc", "= 5): \"\"\" Convert an array of N-bits integer into an array of", "for number in input: carry = carry << input_bits carry += number bits_count", "def _convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc = 0 bits", "= ((acc << frombits) | value) & max_acc bits += frombits while bits", "conversion.\"\"\" acc = 0 bits = 0 ret = bytearray() maxv = (1", "= (carry >> (bits_count - output_bits)) output.append(number) carry -= (number << bits_count -", "input: carry = carry << input_bits carry += number bits_count += input_bits while", "= 0 output = [] for number in input: carry = carry <<", "0 output = [] for number in input: carry = carry << input_bits", "bits_count -= output_bits if bits_count and output_bits > bits_count: output.append(carry << (output_bits -", "array of N'-bits integers \"\"\" carry = 0 bits_count = 0 output =", "an array of N-bits integer into an array of N'-bits integers \"\"\" carry", "and output_bits > bits_count: output.append(carry << (output_bits - bits_count)) return bytes(output) def _convertbits(data,", "bits -= tobits ret.append((acc >> bits) & maxv) if pad and bits: ret.append((acc", "& maxv) if pad and bits: ret.append((acc << (tobits - bits)) & maxv)", "input_bits while bits_count >= output_bits: number = (carry >> (bits_count - output_bits)) output.append(number)", "maxv = (1 << tobits) - 1 max_acc = (1 << (frombits +", "while bits >= tobits: bits -= tobits ret.append((acc >> bits) & maxv) if", "output.append(carry << (output_bits - bits_count)) return bytes(output) def _convertbits(data, frombits, tobits, pad=True): \"\"\"General", "acc = 0 bits = 0 ret = bytearray() maxv = (1 <<", "output_bits: number = (carry >> (bits_count - output_bits)) output.append(number) carry -= (number <<", ">> bits) & maxv) if pad and bits: ret.append((acc << (tobits - bits))", "(bits_count - output_bits)) output.append(number) carry -= (number << bits_count - output_bits) bits_count -=", ">= output_bits: number = (carry >> (bits_count - output_bits)) output.append(number) carry -= (number", "+= frombits while bits >= tobits: bits -= tobits ret.append((acc >> bits) &", "output_bits)) output.append(number) carry -= (number << bits_count - output_bits) bits_count -= output_bits if", "+= input_bits while bits_count >= output_bits: number = (carry >> (bits_count - output_bits))", "+ tobits - 1)) - 1 for value in data: acc = ((acc", "- bits_count)) return bytes(output) def _convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\"", "0 ret = bytearray() maxv = (1 << tobits) - 1 max_acc =", "& max_acc bits += frombits while bits >= tobits: bits -= tobits ret.append((acc", "\"\"\" carry = 0 bits_count = 0 output = [] for number in", "tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc = 0 bits = 0 ret", "tobits) - 1 max_acc = (1 << (frombits + tobits - 1)) -", "<< frombits) | value) & max_acc bits += frombits while bits >= tobits:", "to_n_bits(input, input_bits = 8, output_bits = 5): \"\"\" Convert an array of N-bits", "1 for value in data: acc = ((acc << frombits) | value) &", "| value) & max_acc bits += frombits while bits >= tobits: bits -=", "output.append(number) carry -= (number << bits_count - output_bits) bits_count -= output_bits if bits_count", "bits_count: output.append(carry << (output_bits - bits_count)) return bytes(output) def _convertbits(data, frombits, tobits, pad=True):", "number = (carry >> (bits_count - output_bits)) output.append(number) carry -= (number << bits_count", "integer into an array of N'-bits integers \"\"\" carry = 0 bits_count =", "power-of-2 base conversion.\"\"\" acc = 0 bits = 0 ret = bytearray() maxv", "= (1 << (frombits + tobits - 1)) - 1 for value in", "0 bits = 0 ret = bytearray() maxv = (1 << tobits) -", "tobits - 1)) - 1 for value in data: acc = ((acc <<", "integers \"\"\" carry = 0 bits_count = 0 output = [] for number", "carry -= (number << bits_count - output_bits) bits_count -= output_bits if bits_count and", "= bytearray() maxv = (1 << tobits) - 1 max_acc = (1 <<", "(output_bits - bits_count)) return bytes(output) def _convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2 base", "= (1 << tobits) - 1 max_acc = (1 << (frombits + tobits", "bits_count >= output_bits: number = (carry >> (bits_count - output_bits)) output.append(number) carry -=", "<< (output_bits - bits_count)) return bytes(output) def _convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2", "(1 << tobits) - 1 max_acc = (1 << (frombits + tobits -", "bits >= tobits: bits -= tobits ret.append((acc >> bits) & maxv) if pad", "bits_count - output_bits) bits_count -= output_bits if bits_count and output_bits > bits_count: output.append(carry", "N'-bits integers \"\"\" carry = 0 bits_count = 0 output = [] for", "number in input: carry = carry << input_bits carry += number bits_count +=", "input_bits = 8, output_bits = 5): \"\"\" Convert an array of N-bits integer", "+= number bits_count += input_bits while bits_count >= output_bits: number = (carry >>", "output_bits) bits_count -= output_bits if bits_count and output_bits > bits_count: output.append(carry << (output_bits", "maxv) if pad and bits: ret.append((acc << (tobits - bits)) & maxv) return", ">> (bits_count - output_bits)) output.append(number) carry -= (number << bits_count - output_bits) bits_count", "into an array of N'-bits integers \"\"\" carry = 0 bits_count = 0", "bits_count += input_bits while bits_count >= output_bits: number = (carry >> (bits_count -", "for value in data: acc = ((acc << frombits) | value) & max_acc", "base conversion.\"\"\" acc = 0 bits = 0 ret = bytearray() maxv =", "- 1)) - 1 for value in data: acc = ((acc << frombits)", "Convert an array of N-bits integer into an array of N'-bits integers \"\"\"", "1 max_acc = (1 << (frombits + tobits - 1)) - 1 for", "if bits_count and output_bits > bits_count: output.append(carry << (output_bits - bits_count)) return bytes(output)", "output = [] for number in input: carry = carry << input_bits carry", "max_acc = (1 << (frombits + tobits - 1)) - 1 for value", "value) & max_acc bits += frombits while bits >= tobits: bits -= tobits", "= 8, output_bits = 5): \"\"\" Convert an array of N-bits integer into", "= 0 bits_count = 0 output = [] for number in input: carry", "(carry >> (bits_count - output_bits)) output.append(number) carry -= (number << bits_count - output_bits)", "tobits ret.append((acc >> bits) & maxv) if pad and bits: ret.append((acc << (tobits", "N-bits integer into an array of N'-bits integers \"\"\" carry = 0 bits_count", "= 0 bits = 0 ret = bytearray() maxv = (1 << tobits)", "> bits_count: output.append(carry << (output_bits - bits_count)) return bytes(output) def _convertbits(data, frombits, tobits,", "of N-bits integer into an array of N'-bits integers \"\"\" carry = 0", "- output_bits)) output.append(number) carry -= (number << bits_count - output_bits) bits_count -= output_bits", "(frombits + tobits - 1)) - 1 for value in data: acc =", "carry += number bits_count += input_bits while bits_count >= output_bits: number = (carry", "def to_n_bits(input, input_bits = 8, output_bits = 5): \"\"\" Convert an array of", "- 1 for value in data: acc = ((acc << frombits) | value)", "- 1 max_acc = (1 << (frombits + tobits - 1)) - 1", "of N'-bits integers \"\"\" carry = 0 bits_count = 0 output = []", "tobits: bits -= tobits ret.append((acc >> bits) & maxv) if pad and bits:", "<< bits_count - output_bits) bits_count -= output_bits if bits_count and output_bits > bits_count:", "\"\"\" Convert an array of N-bits integer into an array of N'-bits integers", "-= tobits ret.append((acc >> bits) & maxv) if pad and bits: ret.append((acc <<", "1)) - 1 for value in data: acc = ((acc << frombits) |", "bits = 0 ret = bytearray() maxv = (1 << tobits) - 1", "in input: carry = carry << input_bits carry += number bits_count += input_bits", "pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc = 0 bits = 0 ret =", "bits_count)) return bytes(output) def _convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc", "5): \"\"\" Convert an array of N-bits integer into an array of N'-bits", "_convertbits(data, frombits, tobits, pad=True): \"\"\"General power-of-2 base conversion.\"\"\" acc = 0 bits =", "carry = 0 bits_count = 0 output = [] for number in input:", "bytearray() maxv = (1 << tobits) - 1 max_acc = (1 << (frombits", "((acc << frombits) | value) & max_acc bits += frombits while bits >=", "frombits) | value) & max_acc bits += frombits while bits >= tobits: bits", "array of N-bits integer into an array of N'-bits integers \"\"\" carry =" ]
[ "terms = json.load(json_file) terms = terms[\"search\"] for term in terms: self.tag = term", "date[14:-11].replace(\" \", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name), str(indian_express_link),", "IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self): with", "date_list = response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date = date[14:-11].replace(\" \", \"\") date", "generate_links as generate from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains", "name.replace(\" \", \"_\") indian_express_link = str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date =", "date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name), str(indian_express_link), str(date), str(self.tag), str(article_name),", "import scrapy from yarl import URL from datetime import datetime import json import", "= response.css(\"div.details\") for response_link in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3", "\"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name), str(indian_express_link), str(date), str(self.tag),", "terms = terms[\"search\"] for term in terms: self.tag = term urls = generate(self.name,", "print(date) mpdf = make_pdf( str(self.name), str(indian_express_link), str(date), str(self.tag), str(article_name), ) mpdf.print() except IndexError:", "= response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \", \"_\") indian_express_link", "self.tag = term urls = generate(self.name, term) for url in urls: yield scrapy.Request(url,", "in terms: self.tag = term urls = generate(self.name, term) for url in urls:", "= term urls = generate(self.name, term) for url in urls: yield scrapy.Request(url, self.parse)", "scrapy.Request(url, self.parse) def parse(self, response): response_links = response.css(\"div.details\") for response_link in response_links: anchor", "date = date[14:-11].replace(\" \", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf(", "newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag =", "scrapy from yarl import URL from datetime import datetime import json import newspaper.spiders.config", "str(date_list[0]) date = date[14:-11].replace(\" \", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf =", "#!/usr/bin/python3 import scrapy from yarl import URL from datetime import datetime import json", "date = str(date_list[0]) date = date[14:-11].replace(\" \", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date)", "from newspaper.spiders.generate_links import generate_links as generate from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name", "json_file: terms = json.load(json_file) terms = terms[\"search\"] for term in terms: self.tag =", "for url in urls: yield scrapy.Request(url, self.parse) def parse(self, response): response_links = response.css(\"div.details\")", "config from newspaper.spiders.generate_links import generate_links as generate from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider):", "name = response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \", \"_\") indian_express_link = str(anchor) try:", "with open(config.JSON_FILE) as json_file: terms = json.load(json_file) terms = terms[\"search\"] for term in", "\"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self): with open(config.JSON_FILE) as json_file:", "response): response_links = response.css(\"div.details\") for response_link in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name", "import newspaper.spiders.config as config from newspaper.spiders.generate_links import generate_links as generate from newspaper.spiders.makepdf import", "\", \"_\") indian_express_link = str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0])", "mpdf = make_pdf( str(self.name), str(indian_express_link), str(date), str(self.tag), str(article_name), ) mpdf.print() except IndexError: pass", "generate from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT]", "try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date = date[14:-11].replace(\" \", \"\")", "json import newspaper.spiders.config as config from newspaper.spiders.generate_links import generate_links as generate from newspaper.spiders.makepdf", "as generate from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains =", "make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\" def", "generate(self.name, term) for url in urls: yield scrapy.Request(url, self.parse) def parse(self, response): response_links", "\"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name), str(indian_express_link), str(date), str(self.tag), str(article_name), ) mpdf.print() except", "= str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date = date[14:-11].replace(\"", "def start_requests(self): with open(config.JSON_FILE) as json_file: terms = json.load(json_file) terms = terms[\"search\"] for", "self.parse) def parse(self, response): response_links = response.css(\"div.details\") for response_link in response_links: anchor =", "tag = \"\" def start_requests(self): with open(config.JSON_FILE) as json_file: terms = json.load(json_file) terms", "as json_file: terms = json.load(json_file) terms = terms[\"search\"] for term in terms: self.tag", "def parse(self, response): response_links = response.css(\"div.details\") for response_link in response_links: anchor = response_link.css(\"h3", "from yarl import URL from datetime import datetime import json import newspaper.spiders.config as", "\"\" def start_requests(self): with open(config.JSON_FILE) as json_file: terms = json.load(json_file) terms = terms[\"search\"]", "response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date = date[14:-11].replace(\" \", \"\") date = datetime.strptime(date,", "str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date = date[14:-11].replace(\" \",", "parse(self, response): response_links = response.css(\"div.details\") for response_link in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get()", "terms: self.tag = term urls = generate(self.name, term) for url in urls: yield", "= date[14:-11].replace(\" \", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name),", "= json.load(json_file) terms = terms[\"search\"] for term in terms: self.tag = term urls", "for response_link in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name", "term in terms: self.tag = term urls = generate(self.name, term) for url in", "term) for url in urls: yield scrapy.Request(url, self.parse) def parse(self, response): response_links =", "response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \", \"_\") indian_express_link =", "\", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name), str(indian_express_link), str(date),", "in urls: yield scrapy.Request(url, self.parse) def parse(self, response): response_links = response.css(\"div.details\") for response_link", "as config from newspaper.spiders.generate_links import generate_links as generate from newspaper.spiders.makepdf import make_pdf class", "json.load(json_file) terms = terms[\"search\"] for term in terms: self.tag = term urls =", "datetime import datetime import json import newspaper.spiders.config as config from newspaper.spiders.generate_links import generate_links", "import generate_links as generate from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\"", "\"_\") indian_express_link = str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date", "name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self): with open(config.JSON_FILE)", "article_name = name.replace(\" \", \"_\") indian_express_link = str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse()", "for term in terms: self.tag = term urls = generate(self.name, term) for url", "term urls = generate(self.name, term) for url in urls: yield scrapy.Request(url, self.parse) def", "class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self):", "yield scrapy.Request(url, self.parse) def parse(self, response): response_links = response.css(\"div.details\") for response_link in response_links:", "a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \", \"_\") indian_express_link = str(anchor)", "yarl import URL from datetime import datetime import json import newspaper.spiders.config as config", "response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \", \"_\") indian_express_link = str(anchor) try: date_list =", "newspaper.spiders.generate_links import generate_links as generate from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name =", "= \"\" def start_requests(self): with open(config.JSON_FILE) as json_file: terms = json.load(json_file) terms =", "= response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date = date[14:-11].replace(\" \", \"\") date =", "= name.replace(\" \", \"_\") indian_express_link = str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date", "urls = generate(self.name, term) for url in urls: yield scrapy.Request(url, self.parse) def parse(self,", "datetime import json import newspaper.spiders.config as config from newspaper.spiders.generate_links import generate_links as generate", "import URL from datetime import datetime import json import newspaper.spiders.config as config from", "response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \",", "newspaper.spiders.config as config from newspaper.spiders.generate_links import generate_links as generate from newspaper.spiders.makepdf import make_pdf", "from newspaper.spiders.makepdf import make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag", "import make_pdf class IndianExpressSpider(scrapy.Spider): name = \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\"", "response_links = response.css(\"div.details\") for response_link in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name =", "indian_express_link = str(anchor) try: date_list = response_link.css(\"time::text\").getall() date_list.reverse() date = str(date_list[0]) date =", "in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name = name.replace(\"", "terms[\"search\"] for term in terms: self.tag = term urls = generate(self.name, term) for", "urls: yield scrapy.Request(url, self.parse) def parse(self, response): response_links = response.css(\"div.details\") for response_link in", "= datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name), str(indian_express_link), str(date), str(self.tag), str(article_name), )", "response.css(\"div.details\") for response_link in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get()", "= str(date_list[0]) date = date[14:-11].replace(\" \", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf", "= response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \", \"_\") indian_express_link = str(anchor) try: date_list", "= generate(self.name, term) for url in urls: yield scrapy.Request(url, self.parse) def parse(self, response):", "[config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self): with open(config.JSON_FILE) as json_file: terms = json.load(json_file)", "URL from datetime import datetime import json import newspaper.spiders.config as config from newspaper.spiders.generate_links", "anchor = response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name = name.replace(\" \", \"_\")", "= \"indian_express\" allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self): with open(config.JSON_FILE) as", "from datetime import datetime import json import newspaper.spiders.config as config from newspaper.spiders.generate_links import", "start_requests(self): with open(config.JSON_FILE) as json_file: terms = json.load(json_file) terms = terms[\"search\"] for term", "url in urls: yield scrapy.Request(url, self.parse) def parse(self, response): response_links = response.css(\"div.details\") for", "= terms[\"search\"] for term in terms: self.tag = term urls = generate(self.name, term)", "response_link in response_links: anchor = response_link.css(\"h3 a::attr(href)\").get() name = response_link.css(\"h3 a::text\").get() article_name =", "date_list.reverse() date = str(date_list[0]) date = date[14:-11].replace(\" \", \"\") date = datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\")", "= [config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self): with open(config.JSON_FILE) as json_file: terms =", "datetime.strptime(date, \"%B%d,%Y\").strftime(\"%Y-%b-%d\") print(date) mpdf = make_pdf( str(self.name), str(indian_express_link), str(date), str(self.tag), str(article_name), ) mpdf.print()", "import datetime import json import newspaper.spiders.config as config from newspaper.spiders.generate_links import generate_links as", "allowed_domains = [config.INDIAN_EXPRESS_ROOT] tag = \"\" def start_requests(self): with open(config.JSON_FILE) as json_file: terms", "import json import newspaper.spiders.config as config from newspaper.spiders.generate_links import generate_links as generate from", "open(config.JSON_FILE) as json_file: terms = json.load(json_file) terms = terms[\"search\"] for term in terms:", "a::text\").get() article_name = name.replace(\" \", \"_\") indian_express_link = str(anchor) try: date_list = response_link.css(\"time::text\").getall()" ]
[]
[ "msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async def", "self.ws = ws self.token = client.token self.client = client self.closed = self.ws.closed @classmethod", "client.token self.client = client self.closed = self.ws.closed @classmethod async def start_gateway(cls, client): url", "elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self, msg): data = msg.json()", "self.token = client.token self.client = client self.closed = self.ws.closed @classmethod async def start_gateway(cls,", "def catch_message(self): async for msg in self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg)", "interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await self.login() self.sequence = data[\"s\"] self.client.dispatch(\"gateway_response\", data[\"t\"],", "catch_message(self): async for msg in self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif", "data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws = self,", "await client.ws_connect(url + \"&encoding=json\") self = cls(client, ws) return self async def login(self):", "2, \"d\": { \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\",", "await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url + \"&encoding=json\") self = cls(client, ws)", "data = msg.json() if data[\"op\"] != 0: if data[\"op\"] == 10: self.interval =", "msg.data async def event_catch(self, msg): data = msg.json() if data[\"op\"] != 0: if", "if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async", "= data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws = self, interval = self.interval) await", "= client self.closed = self.ws.closed @classmethod async def start_gateway(cls, client): url = await", "= self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await self.login() self.sequence = data[\"s\"] self.client.dispatch(\"gateway_response\", data[\"t\"], data[\"d\"])", "{ \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } } } await self.send(payload) async", "} await self.send(payload) async def send(self, data:dict): await self.ws.send_json(data) async def catch_message(self): async", "= KeepAlive(ws = self, interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await self.login() self.sequence", "client, ws): self.ws = ws self.token = client.token self.client = client self.closed =", "ws) return self async def login(self): payload = { \"op\": 2, \"d\": {", "def login(self): payload = { \"op\": 2, \"d\": { \"token\": <PASSWORD>.token, \"intents\": 513,", "msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self, msg): data = msg.json() if", "client self.closed = self.ws.closed @classmethod async def start_gateway(cls, client): url = await client.request(\"GET\",", "async for msg in self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type", "start_gateway(cls, client): url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url + \"&encoding=json\")", "self.client = client self.closed = self.ws.closed @classmethod async def start_gateway(cls, client): url =", "url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url + \"&encoding=json\") self =", "= cls(client, ws) return self async def login(self): payload = { \"op\": 2,", "def send(self, data:dict): await self.ws.send_json(data) async def catch_message(self): async for msg in self.ws:", "send(self, data:dict): await self.ws.send_json(data) async def catch_message(self): async for msg in self.ws: if", "\"discord-api.py\", \"$device\": \"discord-api.py\" } } } await self.send(payload) async def send(self, data:dict): await", "DiscordGateway: def __init__(self, client, ws): self.ws = ws self.token = client.token self.client =", "\"discord-api.py\" } } } await self.send(payload) async def send(self, data:dict): await self.ws.send_json(data) async", "if data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws =", "await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self, msg): data", "aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self, msg):", "for msg in self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is", "self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws = self, interval = self.interval)", "if data[\"op\"] != 0: if data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0", "__init__(self, client, ws): self.ws = ws self.token = client.token self.client = client self.closed", "= ws self.token = client.token self.client = client self.closed = self.ws.closed @classmethod async", "client): url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url + \"&encoding=json\") self", "\"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } } } await self.send(payload) async def", "\"$device\": \"discord-api.py\" } } } await self.send(payload) async def send(self, data:dict): await self.ws.send_json(data)", "= self, interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await self.login() self.sequence = data[\"s\"]", "msg.json() if data[\"op\"] != 0: if data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval'] /", "async def send(self, data:dict): await self.ws.send_json(data) async def catch_message(self): async for msg in", "KeepAlive(ws = self, interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await self.login() self.sequence =", "self, interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await self.login() self.sequence = data[\"s\"] self.client.dispatch(\"gateway_response\",", "self.send(payload) async def send(self, data:dict): await self.ws.send_json(data) async def catch_message(self): async for msg", "+ \"&encoding=json\") self = cls(client, ws) return self async def login(self): payload =", "self.ws.send_json(data) async def catch_message(self): async for msg in self.ws: if msg.type is aiohttp.WSMsgType.TEXT:", "login(self): payload = { \"op\": 2, \"d\": { \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\":", "aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self, msg): data = msg.json() if data[\"op\"] !=", "self = cls(client, ws) return self async def login(self): payload = { \"op\":", "= await client.ws_connect(url + \"&encoding=json\") self = cls(client, ws) return self async def", "is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self,", "data:dict): await self.ws.send_json(data) async def catch_message(self): async for msg in self.ws: if msg.type", "class DiscordGateway: def __init__(self, client, ws): self.ws = ws self.token = client.token self.client", "def start_gateway(cls, client): url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url +", "data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws = self, interval = self.interval) await self.send(self.keepalive.get_data())", "} } } await self.send(payload) async def send(self, data:dict): await self.ws.send_json(data) async def", "10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws = self, interval =", "..gateway import KeepAlive class DiscordGateway: def __init__(self, client, ws): self.ws = ws self.token", "sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } } } await self.send(payload) async def send(self,", "\"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } } } await self.send(payload)", "} } await self.send(payload) async def send(self, data:dict): await self.ws.send_json(data) async def catch_message(self):", "async def catch_message(self): async for msg in self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await", "1000.0 self.keepalive = KeepAlive(ws = self, interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await", "client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url + \"&encoding=json\") self = cls(client, ws) return", "{ \"op\": 2, \"d\": { \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\": sys.platform,", "= self.ws.closed @classmethod async def start_gateway(cls, client): url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws", "\"/gateway\")[\"url\"] ws = await client.ws_connect(url + \"&encoding=json\") self = cls(client, ws) return self", "ws = await client.ws_connect(url + \"&encoding=json\") self = cls(client, ws) return self async", "/ 1000.0 self.keepalive = KeepAlive(ws = self, interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start()", "in self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise", "await self.ws.send_json(data) async def catch_message(self): async for msg in self.ws: if msg.type is", "import sys from ..gateway import KeepAlive class DiscordGateway: def __init__(self, client, ws): self.ws", "is aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self, msg): data = msg.json() if data[\"op\"]", "\"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" }", "async def login(self): payload = { \"op\": 2, \"d\": { \"token\": <PASSWORD>.token, \"intents\":", "raise msg.data async def event_catch(self, msg): data = msg.json() if data[\"op\"] != 0:", "payload = { \"op\": 2, \"d\": { \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": {", "\"intents\": 513, \"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } } }", "self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data", "client.ws_connect(url + \"&encoding=json\") self = cls(client, ws) return self async def login(self): payload", "\"&encoding=json\") self = cls(client, ws) return self async def login(self): payload = {", "cls(client, ws) return self async def login(self): payload = { \"op\": 2, \"d\":", "event_catch(self, msg): data = msg.json() if data[\"op\"] != 0: if data[\"op\"] == 10:", "<PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } }", "513, \"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } } } await", "import KeepAlive class DiscordGateway: def __init__(self, client, ws): self.ws = ws self.token =", "\"d\": { \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\":", "from ..gateway import KeepAlive class DiscordGateway: def __init__(self, client, ws): self.ws = ws", "ws): self.ws = ws self.token = client.token self.client = client self.closed = self.ws.closed", "= await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url + \"&encoding=json\") self = cls(client,", "\"op\": 2, \"d\": { \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\": sys.platform, \"$browser\":", "== 10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws = self, interval", "@classmethod async def start_gateway(cls, client): url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await", "self.ws.closed @classmethod async def start_gateway(cls, client): url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws =", "0: if data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive = KeepAlive(ws", "= client.token self.client = client self.closed = self.ws.closed @classmethod async def start_gateway(cls, client):", "async def event_catch(self, msg): data = msg.json() if data[\"op\"] != 0: if data[\"op\"]", "KeepAlive class DiscordGateway: def __init__(self, client, ws): self.ws = ws self.token = client.token", "def event_catch(self, msg): data = msg.json() if data[\"op\"] != 0: if data[\"op\"] ==", "msg in self.ws: if msg.type is aiohttp.WSMsgType.TEXT: await self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR:", "self.event_catch(msg) elif msg.type is aiohttp.WSMsgType.ERROR: raise msg.data async def event_catch(self, msg): data =", "await self.send(payload) async def send(self, data:dict): await self.ws.send_json(data) async def catch_message(self): async for", "\"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\" } } } await self.send(payload) async def send(self, data:dict):", "self.closed = self.ws.closed @classmethod async def start_gateway(cls, client): url = await client.request(\"GET\", \"/gateway\")[\"url\"]", "data[\"op\"] != 0: if data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive", "async def start_gateway(cls, client): url = await client.request(\"GET\", \"/gateway\")[\"url\"] ws = await client.ws_connect(url", "return self async def login(self): payload = { \"op\": 2, \"d\": { \"token\":", "sys from ..gateway import KeepAlive class DiscordGateway: def __init__(self, client, ws): self.ws =", "= msg.json() if data[\"op\"] != 0: if data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval']", "def __init__(self, client, ws): self.ws = ws self.token = client.token self.client = client", "ws self.token = client.token self.client = client self.closed = self.ws.closed @classmethod async def", "{ \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\": sys.platform, \"$browser\": \"discord-api.py\", \"$device\": \"discord-api.py\"", "self async def login(self): payload = { \"op\": 2, \"d\": { \"token\": <PASSWORD>.token,", "!= 0: if data[\"op\"] == 10: self.interval = data[\"d\"]['heartbeat_interval'] / 1000.0 self.keepalive =", "self.keepalive = KeepAlive(ws = self, interval = self.interval) await self.send(self.keepalive.get_data()) self.keepalive.start() await self.login()", "= { \"op\": 2, \"d\": { \"token\": <PASSWORD>.token, \"intents\": 513, \"properties\": { \"$os\":", "msg): data = msg.json() if data[\"op\"] != 0: if data[\"op\"] == 10: self.interval" ]
[ "import authenticate, login, logout from django.views.generic import View, ListView, DetailView,TemplateView from django.urls import", "res = {'code': 1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) #", "class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name = 'user/user_edit.html' context_object_name = 'user' def", "res = {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res = {'code': 1,", "= UserProfile template_name = 'user/user_list.html' context_object_name = 'userlist' paginate_by = 2 keyword =", "{'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res = {'code':", "UserUpdateForm from django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\"", "**kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return context def post(self, request):", "import View, ListView, DetailView,TemplateView from django.urls import reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse,", "import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from django.db.models import Q from django.contrib.auth.hashers", "= True data = _userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0, 'result': '添加用户成功'} except:", "safe=True) def delete(self, request): data = QueryDict(request.body).dict() print(data) pk = data.get('id') try: if", "return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name = 'user/user_edit.html' context_object_name =", "ListView): \"\"\" 组功能 \"\"\" model = UserProfile template_name = 'user/user_list.html' context_object_name = 'userlist'", "= self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self,", "0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg':", "ListView, DetailView,TemplateView from django.urls import reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict,", "django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from django.db.models import Q from", "= queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword']", "def delete(self, request): data = QueryDict(request.body).dict() print(data) pk = data.get('id') try: if pk", "'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code':", "reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else:", "res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res", "django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model =", "# {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if", "DetailView,TemplateView from django.urls import reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404", "UserProfile template_name = 'user/user_list.html' context_object_name = 'userlist' paginate_by = 2 keyword = ''", "= data.get('id') try: if pk == 1: res = {'code': 1, 'result': '不能删除管理员'}", "self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def", "= {'code': 1, 'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'}", "context_object_name = 'user' def post(self, request, **kwargs): print( request.POST) # <QueryDict: {'id': ['7'],", "def get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset", "= QueryDict(request.body).dict() print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm", "except: res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors)", "_userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res", "logout from django.views.generic import View, ListView, DetailView,TemplateView from django.urls import reverse from django.http", "users.models import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from", "获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res", "1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res = {'code': 1,", "import reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from django.conf import", "return context def post(self, request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] =", "= 'user/user_edit.html' context_object_name = 'user' def post(self, request, **kwargs): print( request.POST) # <QueryDict:", "data = _userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0, 'result': '添加用户成功'} except: res =", "= super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return context def post(self, request): _userForm =", "= {'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self, request): data =", "request, **kwargs): print( request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone':", "b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) # {'id': '7', 'username': 'aa',", "if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs): context =", "self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset", "UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data", "user.delete() res = {'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin,", "make_password from users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin,", "PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model = UserProfile template_name = 'user/user_list.html' context_object_name =", "def post(self, request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active']", "'添加用户成功'} except: res = {'code': 1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors)", "except: res = {'code': 1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json())", "JsonResponse(res, safe=True) def delete(self, request): data = QueryDict(request.body).dict() print(data) pk = data.get('id') try:", "res = {'code': 1, 'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete() res =", "pure_pagination.mixins import PaginationMixin from django.db.models import Q from django.contrib.auth.hashers import make_password from users.forms", "try: if pk == 1: res = {'code': 1, 'result': '不能删除管理员'} else: user", "'' login_url = '/login' def get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\",", "= 'user' def post(self, request, **kwargs): print( request.POST) # <QueryDict: {'id': ['7'], 'username':", "from django.contrib.auth.hashers import make_password from django.contrib.auth import authenticate, login, logout from django.views.generic import", "'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except: res =", "else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) #", "= super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword))", "已存在一位使用该名字的用户 res = {'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self, request):", "= '' login_url = '/login' def get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword =", "PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from django.db.models import Q from django.contrib.auth.hashers import make_password", "request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) #", "'7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) # {'id':", "= {'code': 1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0])", "_userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\": reverse(\"users:user_list\"),", "'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res", "'13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\":", "import make_password from django.contrib.auth import authenticate, login, logout from django.views.generic import View, ListView,", "if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data self.model.objects.create(**data)", "data = QueryDict(request.body).dict() print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'}", "= self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True)", "True data = _userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0, 'result': '添加用户成功'} except: res", "context_object_name = 'userlist' paginate_by = 2 keyword = '' login_url = '/login' def", "class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model = UserProfile template_name = 'user/user_list.html'", "= 2 keyword = '' login_url = '/login' def get_queryset(self): queryset = super(UserView,", "post(self, request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] =", "pk = data.get('id') try: if pk == 1: res = {'code': 1, 'result':", "'不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except: res = {'code':1,", "_userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data", "= {'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model", "res = {'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView):", "context = super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return context def post(self, request): _userForm", "'result': '更新用户成功'} except: res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: #", "django.db.models import Q from django.contrib.auth.hashers import make_password from users.forms import UserProfileForm, UserUpdateForm from", "= kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb',", "django.urls import reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from django.conf", "from users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin,", "model = UserProfile template_name = 'user/user_edit.html' context_object_name = 'user' def post(self, request, **kwargs):", "from django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model", "template_name = 'user/user_list.html' context_object_name = 'userlist' paginate_by = 2 keyword = '' login_url", "= {'code': 0, 'result': '添加用户成功'} except: res = {'code': 1, 'errmsg': '添加用户失败'} else:", "# 已存在一位使用该名字的用户 res = {'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self,", "print( request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs)", "if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except:", "= {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res = {'code': 1, \"next_url\":", "= {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res =", "import make_password from users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group, Permission class", "_userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0, 'result': '添加用户成功'} except: res = {'code': 1,", "Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model = UserProfile template_name", "'更新用户成功'} except: res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误", "DetailView): model = UserProfile template_name = 'user/user_edit.html' context_object_name = 'user' def post(self, request,", "queryset = super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)|", "django.shortcuts import render from django.contrib.auth.hashers import make_password from django.contrib.auth import authenticate, login, logout", "{'code': 0, 'result': '添加用户成功'} except: res = {'code': 1, 'errmsg': '添加用户失败'} else: #", "from django.urls import reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from", "手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res,", "super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return context def post(self, request): _userForm = UserProfileForm(request.POST)", "= {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name =", "from pure_pagination.mixins import PaginationMixin from django.db.models import Q from django.contrib.auth.hashers import make_password from", "_userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data self.model.objects.create(**data) res = {'code':", "_userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self, request): data = QueryDict(request.body).dict() print(data) pk =", "['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data =", "= 'userlist' paginate_by = 2 keyword = '' login_url = '/login' def get_queryset(self):", "try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res =", "1: res = {'code': 1, 'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete() res", "print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST)", "{'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body)", "= QueryDict(request.body).dict() print(data) pk = data.get('id') try: if pk == 1: res =", "= self.keyword return context def post(self, request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try:", "= make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0,", "from django.contrib.auth import authenticate, login, logout from django.views.generic import View, ListView, DetailView,TemplateView from", "self.keyword return context def post(self, request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password']", "from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from django.db.models import Q", "\"\"\" model = UserProfile template_name = 'user/user_list.html' context_object_name = 'userlist' paginate_by = 2", "= UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result':", "**kwargs): print( request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}>", "'userlist' paginate_by = 2 keyword = '' login_url = '/login' def get_queryset(self): queryset", "print(_userForm.errors) res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': _userForm.errors} return render(request, settings.JUMP_PAGE, res)", "django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from django.conf import settings from users.models", "from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from django.conf import settings from", "kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone':", "QueryDict(request.body).dict() print(data) pk = data.get('id') try: if pk == 1: res = {'code':", "delete(self, request): data = QueryDict(request.body).dict() print(data) pk = data.get('id') try: if pk ==", "get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset =", "JsonResponse, QueryDict, Http404 from django.conf import settings from users.models import UserProfile from django.contrib.auth.mixins", "data = QueryDict(request.body).dict() print(data) pk = data.get('id') try: if pk == 1: res", "Http404 from django.conf import settings from users.models import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin,", "{'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) #", "from django.db.models import Q from django.contrib.auth.hashers import make_password from users.forms import UserProfileForm, UserUpdateForm", "组功能 \"\"\" model = UserProfile template_name = 'user/user_list.html' context_object_name = 'userlist' paginate_by =", "self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs): context = super(UserView,", "_userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0, 'result': '添加用户成功'}", "users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView):", "super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return", "template_name = 'user/user_edit.html' context_object_name = 'user' def post(self, request, **kwargs): print( request.POST) #", "= 'user/user_list.html' context_object_name = 'userlist' paginate_by = 2 keyword = '' login_url =", "data.get('id') try: if pk == 1: res = {'code': 1, 'result': '不能删除管理员'} else:", "'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk =", "login, logout from django.views.generic import View, ListView, DetailView,TemplateView from django.urls import reverse from", "django.contrib.auth import authenticate, login, logout from django.views.generic import View, ListView, DetailView,TemplateView from django.urls", "print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code': 1, 'errmsg': _userForm.errors.as_json()}", "'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data)", "django.contrib.auth.hashers import make_password from users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group, Permission", "UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name = 'user/user_edit.html' context_object_name = 'user' def post(self,", "2 keyword = '' login_url = '/login' def get_queryset(self): queryset = super(UserView, self).get_queryset()", "_userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data self.model.objects.create(**data) res", "{'code': 1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) #", "res = {'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self, request): data", "Q from django.contrib.auth.hashers import make_password from users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models import", "print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code':", "queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs)", "except: res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile", "\"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res = {'code': 1, \"next_url\":", "from django.views.generic import View, ListView, DetailView,TemplateView from django.urls import reverse from django.http import", "settings from users.models import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import", "['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body) #", "HttpResponse, JsonResponse, QueryDict, Http404 from django.conf import settings from users.models import UserProfile from", "{'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self, request): data = QueryDict(request.body).dict()", "user = self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'} return", "'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self, request): data = QueryDict(request.body).dict() print(data) pk", "pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) # {'id': '7', 'username': 'aa', 'name_cn':", "def post(self, request, **kwargs): print( request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn':", "import PaginationMixin from django.db.models import Q from django.contrib.auth.hashers import make_password from users.forms import", "0, 'result': '添加用户成功'} except: res = {'code': 1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式", "def get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return context def", "'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg':", "res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name", "Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword", "# <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk':", "# 获取所有的表单错误 print(_userForm.errors) res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': _userForm.errors} return render(request,", "1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法", "'7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try:", "1, 'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except: res", "self.request.GET.get(\"keyword\", \"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs):", "context['keyword'] = self.keyword return context def post(self, request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid():", "'user' def post(self, request, **kwargs): print( request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'],", "'添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0])", "return queryset def get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return", "1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def delete(self, request): data = QueryDict(request.body).dict() print(data)", "UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能", "self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class", "'result': '添加用户成功'} except: res = {'code': 1, 'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 #", "['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\")", "reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from django.conf import settings", "View, ListView, DetailView,TemplateView from django.urls import reverse from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse,", "import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from django.conf import settings from users.models import", "paginate_by = 2 keyword = '' login_url = '/login' def get_queryset(self): queryset =", "{'code': 1, 'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except:", "'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0,", "make_password from django.contrib.auth import authenticate, login, logout from django.views.generic import View, ListView, DetailView,TemplateView", "Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model = UserProfile template_name =", "'user/user_edit.html' context_object_name = 'user' def post(self, request, **kwargs): print( request.POST) # <QueryDict: {'id':", "UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model = UserProfile template_name = 'user/user_list.html' context_object_name", "print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code': 1,", "login_url = '/login' def get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip()", "'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168'", "'user/user_list.html' context_object_name = 'userlist' paginate_by = 2 keyword = '' login_url = '/login'", "'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name = 'user/user_edit.html' context_object_name", "= UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data =", "QueryDict(request.body).dict() print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm =", "django.conf import settings from users.models import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from", "make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0, 'result':", "else: user = self.model.objects.filter(pk=pk) user.delete() res = {'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'}", "render from django.contrib.auth.hashers import make_password from django.contrib.auth import authenticate, login, logout from django.views.generic", "self).get_context_data(**kwargs) context['keyword'] = self.keyword return context def post(self, request): _userForm = UserProfileForm(request.POST) if", "{'code':0,'result':'删除用户成功'} except: res = {'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model =", "model = UserProfile template_name = 'user/user_list.html' context_object_name = 'userlist' paginate_by = 2 keyword", "{'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid():", "= '/login' def get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if", "from django.conf import settings from users.models import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin", "# print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res =", "queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword'] =", "authenticate, login, logout from django.views.generic import View, ListView, DetailView,TemplateView from django.urls import reverse", "# 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code': 1, 'errmsg': _userForm.errors.as_json()} return", "print(data) pk = data.get('id') try: if pk == 1: res = {'code': 1,", "# {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data)", "self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res = {'code':", "print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True) def", "\"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': '更新用户失败'}", "import render from django.contrib.auth.hashers import make_password from django.contrib.auth import authenticate, login, logout from", "if pk == 1: res = {'code': 1, 'result': '不能删除管理员'} else: user =", "= UserProfile template_name = 'user/user_edit.html' context_object_name = 'user' def post(self, request, **kwargs): print(", "from django.shortcuts import render from django.contrib.auth.hashers import make_password from django.contrib.auth import authenticate, login,", "UserProfile template_name = 'user/user_edit.html' context_object_name = 'user' def post(self, request, **kwargs): print( request.POST)", "self.model.objects.create(**data) res = {'code': 0, 'result': '添加用户成功'} except: res = {'code': 1, 'errmsg':", "django.contrib.auth.hashers import make_password from django.contrib.auth import authenticate, login, logout from django.views.generic import View,", "\"\").strip() if self.keyword: queryset = queryset.filter(Q(name_cn__icontains=self.keyword)| Q(username__icontains=self.keyword)) return queryset def get_context_data(self, **kwargs): context", "['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk", "# b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) # {'id': '7', 'username':", "# print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code': 1, 'errmsg': _userForm.errors.as_json()} return JsonResponse(res, safe=True)", "from django.contrib.auth.hashers import make_password from users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group,", "获取所有的表单错误 print(_userForm.errors) res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': _userForm.errors} return render(request, settings.JUMP_PAGE,", "UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from django.db.models import", "reverse(\"users:user_list\"), 'errmsg': '更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res = {'code': 1, \"next_url\": reverse(\"users:user_list\"),", "from users.models import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin", "request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True", "try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\") _userForm.cleaned_data['is_active'] = True data = _userForm.cleaned_data self.model.objects.create(**data) res =", "print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict() print(data) # {'id': '7',", "# print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户 res = {'code': 1, 'errmsg':", "print(kwargs) # {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data = QueryDict(request.body).dict()", "import settings from users.models import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins", "request): data = QueryDict(request.body).dict() print(data) pk = data.get('id') try: if pk == 1:", "\"\"\" 组功能 \"\"\" model = UserProfile template_name = 'user/user_list.html' context_object_name = 'userlist' paginate_by", "HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404 from django.conf import settings from users.models import UserProfile", "'phone': ['13305779168']}> print(kwargs) # {'pk': '7'} print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168' pk = kwargs.get(\"pk\") data", "<QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}> print(kwargs) # {'pk': '7'}", "import UserProfileForm, UserUpdateForm from django.contrib.auth.models import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\"", "queryset def get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return context", "res = {'code': 0, 'result': '添加用户成功'} except: res = {'code': 1, 'errmsg': '添加用户失败'}", "'更新用户失败'} else: # 获取所有的表单错误 print(_userForm.errors) res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': _userForm.errors}", "else: # 获取所有的表单错误 print(_userForm.errors) res = {'code': 1, \"next_url\": reverse(\"users:user_list\"), 'errmsg': _userForm.errors} return", "UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res = {'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'}", "JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name = 'user/user_edit.html' context_object_name = 'user'", "== 1: res = {'code': 1, 'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk) user.delete()", "import Group, Permission class UserView(LoginRequiredMixin, PaginationMixin, ListView): \"\"\" 组功能 \"\"\" model = UserProfile", "LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from django.db.models import Q from django.contrib.auth.hashers import", "PaginationMixin from django.db.models import Q from django.contrib.auth.hashers import make_password from users.forms import UserProfileForm,", "django.views.generic import View, ListView, DetailView,TemplateView from django.urls import reverse from django.http import HttpResponseRedirect,HttpRequest,", "import UserProfile from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from pure_pagination.mixins import PaginationMixin from django.db.models", "'errmsg': '添加用户失败'} else: # 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 #", "post(self, request, **kwargs): print( request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'],", "pk == 1: res = {'code': 1, 'result': '不能删除管理员'} else: user = self.model.objects.filter(pk=pk)", "'name_cn': 'bb', 'phone': '13305779168'} _userForm = UserUpdateForm(request.POST) if _userForm.is_valid(): try: self.model.objects.filter(pk=pk).update(**data) res =", "keyword = '' login_url = '/login' def get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword", "context def post(self, request): _userForm = UserProfileForm(request.POST) if _userForm.is_valid(): try: _userForm.cleaned_data['password'] = make_password(\"<PASSWORD>\")", "'/login' def get_queryset(self): queryset = super(UserView, self).get_queryset() self.keyword = self.request.GET.get(\"keyword\", \"\").strip() if self.keyword:", "QueryDict, Http404 from django.conf import settings from users.models import UserProfile from django.contrib.auth.mixins import", "get_context_data(self, **kwargs): context = super(UserView, self).get_context_data(**kwargs) context['keyword'] = self.keyword return context def post(self,", "import Q from django.contrib.auth.hashers import make_password from users.forms import UserProfileForm, UserUpdateForm from django.contrib.auth.models", "# 获取自定义的表单错误的两种常用方式 # print(_userForm.errors) print(_userForm.errors.as_json()) # print(_userForm.errors['phone'][0]) # 手机号码非法 # print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户", "return JsonResponse(res, safe=True) def delete(self, request): data = QueryDict(request.body).dict() print(data) pk = data.get('id')", "{'code':1, 'result':'删除用户失败'} return JsonResponse(res,safe=True) class UserDetailView(LoginRequiredMixin, DetailView): model = UserProfile template_name = 'user/user_edit.html'", "= _userForm.cleaned_data self.model.objects.create(**data) res = {'code': 0, 'result': '添加用户成功'} except: res = {'code':", "{'code': 0, \"next_url\": reverse(\"users:user_list\"), 'result': '更新用户成功'} except: res = {'code': 1, \"next_url\": reverse(\"users:user_list\")," ]
[ "urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$',", "views from django.conf import settings from django.conf.urls.static import static app_name=\"blog\" urlpatterns = [", "url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$',", "import static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$',", "django.conf.urls.static import static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"),", "url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$',", "url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"), ]", "import url from blog import views from django.conf import settings from django.conf.urls.static import", "views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile,", "name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"),", "from django.conf import settings from django.conf.urls.static import static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$',", "views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create,", "name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"),", "name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"), ] if settings.DEBUG:", "blog import views from django.conf import settings from django.conf.urls.static import static app_name=\"blog\" urlpatterns", "django.conf.urls import url from blog import views from django.conf import settings from django.conf.urls.static", "name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL,", "views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail,", "views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"), ] if settings.DEBUG: urlpatterns +=", "import settings from django.conf.urls.static import static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"),", "from django.conf.urls import url from blog import views from django.conf import settings from", "app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"),", "url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"), ] if settings.DEBUG: urlpatterns", "url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$',", "views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"), ] if", "from blog import views from django.conf import settings from django.conf.urls.static import static app_name=\"blog\"", "= [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete,", "url from blog import views from django.conf import settings from django.conf.urls.static import static", "settings from django.conf.urls.static import static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$',", "import views from django.conf import settings from django.conf.urls.static import static app_name=\"blog\" urlpatterns =", "django.conf import settings from django.conf.urls.static import static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit,", "name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"), url(r'(?P<id>\\d+)/(?P<slug>[\\w-]+)/$', views.post_detail, name=\"post_detail\"), url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"),", "[ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete, name=\"post_delete\"), url(r'(?P<id>\\d+)/cmnt_delete/$', views.cmnt_delete, name=\"cmnt_delete\"),", "static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report, name=\"report\"), url(r'(?P<id>\\d+)/post_delete/$', views.post_delete,", "from django.conf.urls.static import static app_name=\"blog\" urlpatterns = [ url(r'(?P<id>\\d+)/post_edit/$', views.post_edit, name=\"post_edit\"), url(r'(?P<reqid>\\d+)/(?P<proid>\\d+)/(?P<posid>\\d+)/(?P<comid>\\d+)/report/$', views.report,", "url(r'post_create/$', views.post_create, name=\"post_create\"), url(r'edit_profile/$', views.edit_profile, name=\"edit_profile\"), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)" ]
[ "# right, # respectively, making the sample size for this comparison quite large.", "Figure( (methods.width + density.width) * 0.9, (methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH", "A2) ax = axs[0, 1] ax.text( 0.4, 0.2, r\"$p = \\frac{\\# \\ edges}{\\#", "pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test", "Thus, if the densities are different, it is likely that tests based on", "\"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2, node_data,", "independent binomials are significantly different. This test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`,", "this estimated parameter $\\hat{p}$. The p-value for testing # the null hypothesis that", "pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right", "# The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest", "matplotlib.collections import LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH", "import sample_toy_networks from svgutils.compose import Figure, Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow", "ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85,", "Fisher's exact test). # ``` #%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the", "if not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0", "number of edges is the sum of # independent Bernoulli trials with the", "size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1, A2) ax =", "model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # # Comparison of", "subgraphs. We see # that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and", "= Figure( (methods.width + density.width) * 0.9, (methods.height) * 0.9, methods_panel, density_panel, )", "this model, the only parameter of interest is the global connection # probability,", "as many of the model-based # parameters we will consider in this paper,", "the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is a difference", "the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it is", "same. In # other words, all edges between any two nodes are equally", "edges is the sum of # independent Bernoulli trials with the same probability.", "# the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it", "in this paper, are strongly related to the network # density. Thus, if", "proportions is well studied. # In our case, we will use Fisher's Exact", "model treats # the probability of each potential edge in the network occuring", "# and simply compare their estimated densities. #%% [markdown] # ## The Erdos-Renyi", "the comparison of # the network densities between the left and right hemisphere", "fig = Figure( (methods.width + density.width) * 0.9, (methods.height) * 0.9, methods_panel, density_panel,", "the probability of the edge $(i, j)$ occuring is: # $$ P[A_{ij} =", "# clustering coefficient, number of triangles, etc), as well as many of the", "FIG_PATH from pkg.io import glue as default_glue from pkg.io import savefig from pkg.plot", "and $p^{(R)}$), and then # run a statistical test to see if these", "test). # ``` #%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of", "#%% A1, A2, node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette = {1:", "parameter $\\hat{p}$. The p-value for testing # the null hypothesis that these densities", "It is unclear whether this # difference in densities is biological (e.g. a", "the left # hemisphere), or something else entirely. Still, the ER test results", "We say that for all $(i, j), i \\neq j$, with $i$ and", "whether an observed difference for these other tests could be # explained by", "the adjacency matrix $A$ is then sampled independently according to a # [Bernoulli", "yields a significant # difference between brain hemispheres for this organism. It is", "each as an Erdos-Renyi network # and simply compare their estimated densities. #%%", "== 0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i", "in density alone. #%% FIG_PATH = FIG_PATH / FILENAME fontsize = 12 methods", "number of triangles, etc), as well as many of the model-based # parameters", "SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods, Text(\"A) Density test", "we have strong # evidence to reject this version of our hypotheis of", "merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0,", "right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is a difference likely", "figure=True) if not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs)", "misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette,", "= 18 for i, p in enumerate(ps): A = er_np(n, p) if i", "explained by this difference in density alone. #%% FIG_PATH = FIG_PATH / FILENAME", "labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text(", "size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel =", "density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density, Text(\"B) Density comparison\", 5, 10, size=fontsize,", "as np import pandas as pd import seaborn as sns from giskard.plot import", "the network densities between the left and right hemisphere induced subgraphs. We see", "all edges between any two nodes are equally likely. # # ```{admonition} Math", "the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To", "run a statistical test to see if these densities are significantly different. #", "tests could be # explained by this difference in density alone. #%% FIG_PATH", "model is too simple to be an interesting description of connectome structure. #", "cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig)", "of each potential edge in the network occuring to be the same. In", "t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj,", "based on any # of these other test statistics will also reject the", "compare two networks $A^{(L)}$ and $A^{(R)}$ under this model, we # simply need", "$\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax,", "= sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs =", "Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1),", "to compare two networks $A^{(L)}$ and $A^{(R)}$ under this model, we # simply", "#%% import datetime import time import matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms", "pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose", "ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1, A2, node_data = sample_toy_networks() node_data[\"labels\"] =", "clustering coefficient, number of triangles, etc), as well as many of the model-based", "# Here, we compare the two unmatched networks by treating each as an", "#%% [markdown] # # Density test # Here, we compare the two unmatched", "``` # Thus, for this model, the only parameter of interest is the", "0.9, 0) fig = Figure( (methods.width + density.width) * 0.9, (methods.height) * 0.9,", "axs = plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n =", "# density. Thus, if the densities are different, it is likely that tests", "$p^{(L)}$ vs. # $p^{(R)}$. Formally, we are testing: # $$H_0: p^{(L)} = p^{(R)},", "a $Binomial(n(n-1), p)$ distribution, # where $n$ is the number of nodes. This", "success probabilities # between two independent binomials are significantly different. This test yields", "test # Here, we compare the two unmatched networks by treating each as", "we note that *even the simplest network model* yields a significant # difference", "all $(i, j), i \\neq j$, with $i$ and # $j$ both running", "reject this version of our hypotheis of bilateral symmetry. We note that #", "n_steps = len(ps) fig, axs = plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]),", "network models. This model treats # the probability of each potential edge in", "and # $j$ both running # from $1 ... n$, the probability of", "the sample size for this comparison quite large. # # To our knowledge,", "# $p^{(R)}$. Formally, we are testing: # $$H_0: p^{(L)} = p^{(R)}, \\quad H_a:", "p^{(L)})$$ # and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To", "are significantly different. # ```{admonition} Math # Under this # model, the total", "density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it is", "test methods\", 5, 10, size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height)", "the left and # right, # respectively, making the sample size for this", "# hemisphere, and $m^{(R)}$ is the number of edges on the right, then", "load_unmatched(\"right\") #%% # describe ER model np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps", "$$ # ``` # Thus, for this model, the only parameter of interest", "savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density from pkg.stats", "== 0: label_text = r\"$p = $\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax", "$m$ comes from a $Binomial(n(n-1), p)$ distribution, # where $n$ is the number", "#%% # describe ER model np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps =", "$(i, j)$ occuring is: # $$ P[A_{ij} = 1] = p_{ij} = p", "interesting description of connectome structure. # However, we note that *even the simplest", "this # model, the total number of edges $m$ comes from a $Binomial(n(n-1),", "For a network modeled as described above, we say it is distributed #", "\\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} -", "node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%%", "because the number of edges is the sum of # independent Bernoulli trials", "the question of bilateral # symmetry, they have not meant such a simple", "yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\")", "ax.axis(\"off\") x = 0 y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2)", "bilateral symmetry under the ER model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name:", "hemisphere induced subgraphs. We see # that the density on the left is", "-0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue, misc = erdos_renyi_test(left_adj,", "of # the network densities between the left and right hemisphere induced subgraphs.", "misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig)", "these other test statistics will also reject the null hypothesis. Thus, we will", "1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1, A2, node_data = sample_toy_networks()", "A \\sim ER(n, p) $$ # ``` # Thus, for this model, the", "from matplotlib.collections import LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io import", "treats # the probability of each potential edge in the network occuring to", "nodes. We say that for all $(i, j), i \\neq j$, with $i$", "model, # we ran a two-sided Fisher's exact test, which tests whether the", "the two networks, we are just interested in a comparison of $p^{(L)}$ vs.", "fig #%% elapsed = time.time() - t0 delta = datetime.timedelta(seconds=elapsed) print(f\"Script took {delta}\")", "densities are significantly different. # ```{admonition} Math # Under this # model, the", "many of the model-based # parameters we will consider in this paper, are", "pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text =", "gluefig(\"er_explain\", fig) #%% A1, A2, node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette", "#%% elapsed = time.time() - t0 delta = datetime.timedelta(seconds=elapsed) print(f\"Script took {delta}\") print(f\"Completed", "is the the global connection probability. # Each element of the adjacency matrix", "for # the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right #", "ps = [0.2, 0.4, 0.6] n_steps = len(ps) fig, axs = plt.subplots( 2,", "= {1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax =", "# {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test). # ``` #%% [markdown] #", "the right, then we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ #", "$$ # For a network modeled as described above, we say it is", "import datetime import time import matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms import", ") density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density,", "number of nodes. This is because the number of edges is the sum", "yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong # evidence", "#%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of # the network", "simple to be an interesting description of connectome structure. # However, we note", "equal proportions is well studied. # In our case, we will use Fisher's", "a comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we are testing: # $$H_0:", "connection # probability, $p$. This is sometimes also referred to as the **network", "intervals for this estimated parameter $\\hat{p}$. The p-value for testing # the null", "probability. # Each element of the adjacency matrix $A$ is then sampled independently", "# # Comparison of estimated densities for the left and right hemisphere networks.", "making the sample size for this comparison quite large. # # To our", "and $A^{(R)}$ under this model, we # simply need to compute these network", "# need ways of telling whether an observed difference for these other tests", "Under this # model, the total number of edges $m$ comes from a", "of our hypotheis of bilateral symmetry. We note that # while the difference", "one of the simplest network models. This model treats # the probability of", "ER model # In order to compare two networks $A^{(L)}$ and $A^{(R)}$ under", "# {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and # right,", "$A$ is then sampled independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$", "of testing for equal proportions is well studied. # In our case, we", "unclear whether this # difference in densities is biological (e.g. a result of", "$A^{(R)}$ under this model, we # simply need to compute these network densities", "if i == 0: label_text = r\"$p = $\" + label_text ax.set_title(label_text, pad=10)", "an artifact of how the data was collected (e.g. # technological limitations causing", "# In our case, we will use Fisher's Exact test to run this", "j)$ occuring is: # $$ P[A_{ij} = 1] = p_{ij} = p $$", "= erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"]", "Here, we compare the two unmatched networks by treating each as an Erdos-Renyi", "the null hypothesis that these densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two", "to be the same. In # other words, all edges between any two", "these network densities ($p^{(L)}$ and $p^{(R)}$), and then # run a statistical test", "0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\",", "2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18 for i,", "something else entirely. Still, the ER test results also provide # important considerations", "foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs):", "svgutils.compose import Figure, Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True", "\\sim ER(n, p) $$ # ``` # Thus, for this model, the only", "the data was collected (e.g. # technological limitations causing slightly lower reconstruction rates", "an interesting description of connectome structure. # However, we note that *even the", "nodes. This is because the number of edges is the sum of #", "np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2, figsize=(6, 6),", "telling whether an observed difference for these other tests could be # explained", "n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"]", "import matplotlib.pyplot as plt import matplotlib.transforms import numpy as np import pandas as", "This model treats # the probability of each potential edge in the network", "\"composite.svg\") fig #%% elapsed = time.time() - t0 delta = datetime.timedelta(seconds=elapsed) print(f\"Script took", "... n$, the probability of the edge $(i, j)$ occuring is: # $$", "0.4, 0.6] n_steps = len(ps) fig, axs = plt.subplots( 2, n_steps, figsize=(6, 3),", "plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] # ## Reject bilateral symmetry under", "networks by treating each as an Erdos-Renyi network # and simply compare their", "model # The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the", "# between two independent binomials are significantly different. This test yields a #", "~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated parameter", "def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS:", "matplotlib.transforms import numpy as np import pandas as pd import seaborn as sns", "massive, this low p-value # results from the large sample size for this", "strongly related to the network # density. Thus, if the densities are different,", "ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\",", "independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To compare the two", "n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%%", "that *even the simplest network model* yields a significant # difference between brain", "# Comparison of estimated densities for the left and right hemisphere networks. The", "of slightly differing rates of # development for this individual), an artifact of", "tests. Almost any network statistic (e.g. # clustering coefficient, number of triangles, etc),", "\"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density, Text(\"B) Density comparison\", 5, 10,", "fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1,", "large. # # To our knowledge, when neuroscientists have considered the question of", "ER model np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps = len(ps) fig, axs", "sum of # independent Bernoulli trials with the same probability. If $m^{(L)}$ is", "We note that # while the difference between estimated densities is not massive,", "for this comparison. We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f`", "\\sim Bernoulli(p) $$ # For a network modeled as described above, we say", "network model* yields a significant # difference between brain hemispheres for this organism.", "2), ylim=(0, 1)) ax = axs[1, 1] ax.axis(\"off\") x = 0 y =", "axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1))", "plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18 for", "# independent Bernoulli trials with the same probability. If $m^{(L)}$ is the number", "exact test, which tests whether the success probabilities # between two independent binomials", "symmetry under the ER model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\"", "we are just interested in a comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally,", "density_panel = Panel( density, Text(\"B) Density comparison\", 5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width", "it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is a difference likely to", "= 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue,", "glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\")", "from graspologic.simulations import er_np from matplotlib.collections import LineCollection from pkg.data import load_network_palette, load_node_palette,", "results from the large sample size for this comparison. We note that there", "= axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10,", "/ \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density, Text(\"B) Density comparison\", 5,", "ha=\"right\", labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"],", "number of nodes. We say that for all $(i, j), i \\neq j$,", "hypotheses above. # ``` #%% import datetime import time import matplotlib.path import matplotlib.pyplot", "1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1, A2, node_data", "ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1, 1] ax.axis(\"off\") x = 0", "p)$ distribution, # where $n$ is the number of nodes. This is because", "model, the only parameter of interest is the global connection # probability, $p$.", "pkg.io import savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density", "ax=ax, compute_layout=i == 0) label_text = f\"{p}\" if i == 0: label_text =", "meant such a simple comparison of proportions. In many ways, # the ER", "# run a statistical test to see if these densities are significantly different.", "# Thus, for this model, the only parameter of interest is the global", "ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax", "the null hypothesis. Thus, we will # need ways of telling whether an", "whether the success probabilities # between two independent binomials are significantly different. This", "by chance under the ER model, # we ran a two-sided Fisher's exact", "comes from a $Binomial(n(n-1), p)$ distribution, # where $n$ is the number of", "0: label_text = r\"$p = $\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax =", "well as many of the model-based # parameters we will consider in this", "x = 0 y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\",", "To compare the two networks, we are just interested in a comparison of", "from pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks from", "for the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%**", "color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax)", "var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY =", "probability. If $m^{(L)}$ is the number of # edges on the left #", "for the null and # alternative hypotheses above. # ``` #%% import datetime", "the the global connection probability. # Each element of the adjacency matrix $A$", "density.move(10, 15) density_panel = Panel( density, Text(\"B) Density comparison\", 5, 10, size=fontsize, weight=\"bold\")", "estimated densities is not massive, this low p-value # results from the large", "# the null hypothesis that these densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g`", "these densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact", "P[A_{ij} = 1] = p_{ij} = p $$ # Where $p$ is the", "= plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18", "network # and simply compare their estimated densities. #%% [markdown] # ## The", "= load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%% # describe ER model np.random.seed(8888) ps", "FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel", "difference in densities is biological (e.g. a result of slightly differing rates of", "from pkg.io import savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import", "which tests whether the success probabilities # between two independent binomials are significantly", "are just interested in a comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we", "gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close()", "A_{ij} \\sim Bernoulli(p) $$ # For a network modeled as described above, we", "= misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left =", "null and # alternative hypotheses above. # ``` #%% import datetime import time", "# describe ER model np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps = len(ps)", "coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] # ## Reject bilateral", "i == 0: label_text = r\"$p = $\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\")", "matplotlib.pyplot as plt import matplotlib.transforms import numpy as np import pandas as pd", "#%% coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%%", "need ways of telling whether an observed difference for these other tests could", "(methods.width + density.width) * 0.9, (methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH /", "while the difference between estimated densities is not massive, this low p-value #", "# where $n$ is the number of nodes. This is because the number", "fig) #%% [markdown] # ## Reject bilateral symmetry under the ER model #%%", "= r\"$p = $\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs,", "(0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([])", "networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10,", "$p^{(R)}$. Formally, we are testing: # $$H_0: p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)}", "is biological (e.g. a result of slightly differing rates of # development for", ") fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed = time.time() - t0 delta =", "comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we are testing: # $$H_0: p^{(L)}", "we are testing: # $$H_0: p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$", "estimated densities for the left and right hemisphere networks. The # estimated density", "0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj)", "# and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To compare", "comparison quite large. # # To our knowledge, when neuroscientists have considered the", "observed difference for these other tests could be # explained by this difference", "The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest network", "# To compare the two networks, we are just interested in a comparison", "that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left", "the global connection # probability, $p$. This is sometimes also referred to as", "# we ran a two-sided Fisher's exact test, which tests whether the success", "1), p^{(L)})$$ # and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ #", "probability of each potential edge in the network occuring to be the same.", "is sometimes also referred to as the **network density**. #%% [markdown] # ##", "as default_glue from pkg.io import savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme from", "Thus, we will # need ways of telling whether an observed difference for", "(probability of any edge across the entire network), $\\hat{p}$, for # the left", "# the network densities between the left and right hemisphere induced subgraphs. We", "density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right", "import merge_axes, soft_axis_off from graspologic.simulations import er_np from matplotlib.collections import LineCollection from pkg.data", "is: # $$ P[A_{ij} = 1] = p_{ij} = p $$ # Where", "networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue,", "[markdown] # # Density test # Here, we compare the two unmatched networks", "0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing", "model-based # parameters we will consider in this paper, are strongly related to", ") n = 18 for i, p in enumerate(ps): A = er_np(n, p)", "size for this comparison quite large. # # To our knowledge, when neuroscientists", "#%% FIG_PATH = FIG_PATH / FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH /", "p^{(L)} \\neq p^{(R)}$$ # Fortunately, the problem of testing for equal proportions is", "glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%%", "weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0) fig = Figure( (methods.width + density.width) *", "import matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms import numpy as np import", "n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\")", "axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, )", "of bilateral symmetry. We note that # while the difference between estimated densities", "$n$ is the number of nodes. This is because the number of edges", "is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is a difference likely to be", "n$, the probability of the edge $(i, j)$ occuring is: # $$ P[A_{ij}", "import erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose import Figure, Panel, Text from", "axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0,", "n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\",", "import LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH from", "density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\",", "# ```{admonition} Math # Let $n$ be the number of nodes. We say", "\\ potential \\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0,", "left # hemisphere, and $m^{(R)}$ is the number of edges on the right,", "on the left and # right, # respectively, making the sample size for", "induced subgraphs. We see # that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`,", "= time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes", "of the model-based # parameters we will consider in this paper, are strongly", "left and right hemisphere networks. The # estimated density (probability of any edge", "then sampled independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim", "$$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)}", "However, we note that *even the simplest network model* yields a significant #", "compare the two unmatched networks by treating each as an Erdos-Renyi network #", "0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\",", "np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps = len(ps) fig, axs = plt.subplots(", "In many ways, # the ER model is too simple to be an", "fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close() def", "density.width) * 0.9, (methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig", "on the right, then we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$", "comparison\", 5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0) fig = Figure(", "from pkg.io import glue as default_glue from pkg.io import savefig from pkg.plot import", "= load_unmatched(\"right\") #%% # describe ER model np.random.seed(8888) ps = [0.2, 0.4, 0.6]", "this comparison. We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential", "var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY", "the ER test results also provide # important considerations for other tests. Almost", "glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\",", "Still, the ER test results also provide # important considerations for other tests.", "same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test). # ``` #%%", "vs. # $p^{(R)}$. Formally, we are testing: # $$H_0: p^{(L)} = p^{(R)}, \\quad", "of proportions. In many ways, # the ER model is too simple to", "above, we say it is distributed # $$ A \\sim ER(n, p) $$", "as plt import matplotlib.transforms import numpy as np import pandas as pd import", "slightly differing rates of # development for this individual), an artifact of how", "for equal proportions is well studied. # In our case, we will use", "is the number of edges on the right, then we have: # $$m^{(L)}", "seaborn as sns from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import er_np from", "methods\", 5, 10, size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10,", "# development for this individual), an artifact of how the data was collected", "**kwargs): default_glue(name, var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette()", "Testing under the ER model # In order to compare two networks $A^{(L)}$", "right, # respectively, making the sample size for this comparison quite large. #", "have not meant such a simple comparison of proportions. In many ways, #", "# is one of the simplest network models. This model treats # the", "default_glue(name, var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette,", "a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p) $$ # For a", "```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # # Comparison of estimated densities for the", "Exact test to run this test for the null and # alternative hypotheses", "$j$ both running # from $1 ... n$, the probability of the edge", "use Fisher's Exact test to run this test for the null and #", "n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right = misc[\"probability2\"]", "of how the data was collected (e.g. # technological limitations causing slightly lower", "0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed = time.time() -", "will use Fisher's Exact test to run this test for the null and", "of # edges on the left # hemisphere, and $m^{(R)}$ is the number", "is not massive, this low p-value # results from the large sample size", "# hemisphere), or something else entirely. Still, the ER test results also provide", "/ FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20)", "knowledge, when neuroscientists have considered the question of bilateral # symmetry, they have", "= axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\",", "entirely. Still, the ER test results also provide # important considerations for other", "#%% stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left =", "also referred to as the **network density**. #%% [markdown] # ## Testing under", "left and # right, # respectively, making the sample size for this comparison", "as the **network density**. #%% [markdown] # ## Testing under the ER model", "= 0 y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig)", "parameter of interest is the global connection # probability, $p$. This is sometimes", "is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this", "a simple comparison of proportions. In many ways, # the ER model is", "result of slightly differing rates of # development for this individual), an artifact", "left # hemisphere), or something else entirely. Still, the ER test results also", "density alone. #%% FIG_PATH = FIG_PATH / FILENAME fontsize = 12 methods =", "model, the total number of edges $m$ comes from a $Binomial(n(n-1), p)$ distribution,", "on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is a", "== 0) label_text = f\"{p}\" if i == 0: label_text = r\"$p =", "In # other words, all edges between any two nodes are equally likely.", "1] = p_{ij} = p $$ # Where $p$ is the the global", "description of connectome structure. # However, we note that *even the simplest network", "\"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods, Text(\"A) Density test methods\", 5,", "for this comparison quite large. # # To our knowledge, when neuroscientists have", "our case, we will use Fisher's Exact test to run this test for", "hemisphere, and $m^{(R)}$ is the number of edges on the right, then we", "for this organism. It is unclear whether this # difference in densities is", "pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5),", "of the edge $(i, j)$ occuring is: # $$ P[A_{ij} = 1] =", "```{admonition} Math # Let $n$ be the number of nodes. We say that", "$p$. This is sometimes also referred to as the **network density**. #%% [markdown]", "(0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\")", "gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\",", "are testing: # $$H_0: p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$ #", "Panel( density, Text(\"B) Density comparison\", 5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9,", "NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes", "global connection # probability, $p$. This is sometimes also referred to as the", "the densities are different, it is likely that tests based on any #", "misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right =", "studied. # In our case, we will use Fisher's Exact test to run", "consider in this paper, are strongly related to the network # density. Thus,", "the model-based # parameters we will consider in this paper, are strongly related", "12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods,", "with $i$ and # $j$ both running # from $1 ... n$, the", "``` #%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of # the", "p-value # results from the large sample size for this comparison. We note", "densities. #%% [markdown] # ## The Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER)", "Fisher's Exact test to run this test for the null and # alternative", "note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the", "node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2,", "~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether", "this difference in density alone. #%% FIG_PATH = FIG_PATH / FILENAME fontsize =", "load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH from pkg.io import glue as default_glue", "there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and", "whether this is a difference likely to be observed by chance under the", "are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test). #", "lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1,", "the entire network), $\\hat{p}$, for # the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while", "the total number of edges $m$ comes from a $Binomial(n(n-1), p)$ distribution, #", "the left # hemisphere, and $m^{(R)}$ is the number of edges on the", "glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right,", "# $j$ both running # from $1 ... n$, the probability of the", "then # run a statistical test to see if these densities are significantly", "that for all $(i, j), i \\neq j$, with $i$ and # $j$", "from pkg.utils import sample_toy_networks from svgutils.compose import Figure, Panel, Text from pkg.plot import", "bilateral # symmetry, they have not meant such a simple comparison of proportions.", "network modeled as described above, we say it is distributed # $$ A", "giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import er_np from matplotlib.collections import LineCollection from", "ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1, A2, node_data = sample_toy_networks() node_data[\"labels\"]", "ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1, 1] ax.axis(\"off\") x =", "the number of edges on the right, then we have: # $$m^{(L)} \\sim", "er_np(n, p) if i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i]", "under this model, we # simply need to compute these network densities ($p^{(L)}$", "# the ER model is too simple to be an interesting description of", "+ label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15,", "savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close() def glue(name, var,", "ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1, A2,", "DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs)", "left and right hemisphere induced subgraphs. We see # that the density on", "a statistical test to see if these densities are significantly different. # ```{admonition}", "these densities are significantly different. # ```{admonition} Math # Under this # model,", "= misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right", "modeled as described above, we say it is distributed # $$ A \\sim", "load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\")", "p^{(R)})$$ # To compare the two networks, we are just interested in a", "question of bilateral # symmetry, they have not meant such a simple comparison", "gluefig(\"er_methods\", fig) #%% stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%%", "and # alternative hypotheses above. # ``` #%% import datetime import time import", "from pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH from pkg.io import", "that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right", "edges $m$ comes from a $Binomial(n(n-1), p)$ distribution, # where $n$ is the", "gluefig(\"er_density\", fig) #%% [markdown] # ## Reject bilateral symmetry under the ER model", "each potential edge in the network occuring to be the same. In #", "This test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong", "connection probability. # Each element of the adjacency matrix $A$ is then sampled", "True FILENAME = \"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig,", "1)) ax = axs[1, 1] ax.axis(\"off\") x = 0 y = 0.55 draw_hypothesis_box(\"er\",", "global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax = axs[1,", "null hypothesis that these densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two #", "as an Erdos-Renyi network # and simply compare their estimated densities. #%% [markdown]", "the ER model # In order to compare two networks $A^{(L)}$ and $A^{(R)}$", "also reject the null hypothesis. Thus, we will # need ways of telling", "# # Density test # Here, we compare the two unmatched networks by", "A1, A2, node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]}", "= SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods, Text(\"A) Density", "rates of # development for this individual), an artifact of how the data", "biological (e.g. a result of slightly differing rates of # development for this", "density_left = misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left", "adjacency matrix $A$ is then sampled independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution):", "the number of nodes. This is because the number of edges is the", "figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18 for i, p in", "# {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and # right, # respectively, making", "= misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc,", "occuring to be the same. In # other words, all edges between any", "for this individual), an artifact of how the data was collected (e.g. #", "y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat,", "$m^{(R)}$ is the number of edges on the right, then we have: #", "Text(\"A) Density test methods\", 5, 10, size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH /", "the null and # alternative hypotheses above. # ``` #%% import datetime import", "provide # important considerations for other tests. Almost any network statistic (e.g. #", "sometimes also referred to as the **network density**. #%% [markdown] # ## Testing", "from pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose import Figure, Panel,", "it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for", "import plot_density from pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose import", "this is a difference likely to be observed by chance under the ER", "# difference between brain hemispheres for this organism. It is unclear whether this", "misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"]", "described above, we say it is distributed # $$ A \\sim ER(n, p)", "datetime import time import matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms import numpy", "network statistic (e.g. # clustering coefficient, number of triangles, etc), as well as", "= p_{ij} = p $$ # Where $p$ is the the global connection", "according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p) $$ #", "import er_np from matplotlib.collections import LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched from", "the ER model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # #", "strong # evidence to reject this version of our hypotheis of bilateral symmetry.", "# probability, $p$. This is sometimes also referred to as the **network density**.", "a network modeled as described above, we say it is distributed # $$", "proportions. In many ways, # the ER model is too simple to be", "# ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest network models. This model treats", "label_text = f\"{p}\" if i == 0: label_text = r\"$p = $\" +", "Figure, Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME =", "## The Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) #", "we will consider in this paper, are strongly related to the network #", "p in enumerate(ps): A = er_np(n, p) if i == 0: node_data =", "the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** #", "label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5),", "n_edges_right = misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage)", "simply compare their estimated densities. #%% [markdown] # ## The Erdos-Renyi (ER) model", "and simply compare their estimated densities. #%% [markdown] # ## The Erdos-Renyi (ER)", "= \"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if", "load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%% # describe ER model np.random.seed(8888) ps =", "import time import matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms import numpy as", "# hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`.", "{glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test). # ``` #%% [markdown] # {numref}`Figure", "0.2, r\"$p = \\frac{\\# \\ edges}{\\# \\ potential \\ edges}$\", ha=\"center\", va=\"center\", )", "fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue,", "## Reject bilateral symmetry under the ER model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density", "# $$ A \\sim ER(n, p) $$ # ``` # Thus, for this", "potential edge in the network occuring to be the same. In # other", "+ density.width) * 0.9, (methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\")", "ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2,", "# technological limitations causing slightly lower reconstruction rates on the left # hemisphere),", "glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name, var,", "[Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p) $$ # For a network modeled", "2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection", "to see if these densities are significantly different. # ```{admonition} Math # Under", "binomials are significantly different. This test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting", "for this model, the only parameter of interest is the global connection #", "independent Bernoulli trials with the same probability. If $m^{(L)}$ is the number of", "this model, we # simply need to compute these network densities ($p^{(L)}$ and", "is one of the simplest network models. This model treats # the probability", "\\neq j$, with $i$ and # $j$ both running # from $1 ...", "compare the two networks, we are just interested in a comparison of $p^{(L)}$", "the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it", "test to see if these densities are significantly different. # ```{admonition} Math #", "density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage", "Math # Let $n$ be the number of nodes. We say that for", "# on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is", "A = er_np(n, p) if i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax =", "= misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\",", "neuroscientists have considered the question of bilateral # symmetry, they have not meant", "is too simple to be an interesting description of connectome structure. # However,", "data was collected (e.g. # technological limitations causing slightly lower reconstruction rates on", "# p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong # evidence to reject", "networkplot_simple, set_theme from pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test from pkg.utils import", "an Erdos-Renyi network # and simply compare their estimated densities. #%% [markdown] #", "the large sample size for this comparison. We note that there are #", "we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and independently, #", "important considerations for other tests. Almost any network statistic (e.g. # clustering coefficient,", "Formally, we are testing: # $$H_0: p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)} \\neq", ") ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0,", "# evidence to reject this version of our hypotheis of bilateral symmetry. We", "test, which tests whether the success probabilities # between two independent binomials are", "models. This model treats # the probability of each potential edge in the", "where $n$ is the number of nodes. This is because the number of", "fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # # Comparison of estimated densities for the left", "# However, we note that *even the simplest network model* yields a significant", "if the densities are different, it is likely that tests based on any", "= p $$ # Where $p$ is the the global connection probability. #", "this # difference in densities is biological (e.g. a result of slightly differing", "hypothesis. Thus, we will # need ways of telling whether an observed difference", "densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test).", "[markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # # Comparison of estimated densities", "(e.g. # technological limitations causing slightly lower reconstruction rates on the left #", "that # while the difference between estimated densities is not massive, this low", ") ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1, 1] ax.axis(\"off\")", "on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`.", "and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and # right, # respectively,", "simplest network model* yields a significant # difference between brain hemispheres for this", "- 1), p^{(L)})$$ # and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$", "causing slightly lower reconstruction rates on the left # hemisphere), or something else", "treating each as an Erdos-Renyi network # and simply compare their estimated densities.", "import savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density from", "j), i \\neq j$, with $i$ and # $j$ both running # from", "# ``` # Thus, for this model, the only parameter of interest is", "load_unmatched from pkg.io import FIG_PATH from pkg.io import glue as default_glue from pkg.io", "the same. In # other words, all edges between any two nodes are", "ER model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # # Comparison", "to compute these network densities ($p^{(L)}$ and $p^{(R)}$), and then # run a", "will # need ways of telling whether an observed difference for these other", "for this estimated parameter $\\hat{p}$. The p-value for testing # the null hypothesis", "= 12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel = Panel(", "the problem of testing for equal proportions is well studied. # In our", "determine # whether this is a difference likely to be observed by chance", "fig) #%% stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left", "Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To compare the two networks, we are just", "above. # ``` #%% import datetime import time import matplotlib.path import matplotlib.pyplot as", "on the left # hemisphere), or something else entirely. Still, the ER test", "$Binomial(n(n-1), p)$ distribution, # where $n$ is the number of nodes. This is", "both running # from $1 ... n$, the probability of the edge $(i,", "describe ER model np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps = len(ps) fig,", "of # development for this individual), an artifact of how the data was", "H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately, the problem of testing for equal proportions", "to be an interesting description of connectome structure. # However, we note that", "of # independent Bernoulli trials with the same probability. If $m^{(L)}$ is the", "# Fortunately, the problem of testing for equal proportions is well studied. #", "edges on the left and # right, # respectively, making the sample size", "was collected (e.g. # technological limitations causing slightly lower reconstruction rates on the", "$n$ be the number of nodes. We say that for all $(i, j),", "SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test from pkg.utils", "tests whether the success probabilities # between two independent binomials are significantly different.", "To our knowledge, when neuroscientists have considered the question of bilateral # symmetry,", "global connection probability. # Each element of the adjacency matrix $A$ is then", "is the number of nodes. This is because the number of edges is", "NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%% #", "considered the question of bilateral # symmetry, they have not meant such a", "# To our knowledge, when neuroscientists have considered the question of bilateral #", "*even the simplest network model* yields a significant # difference between brain hemispheres", "# respectively, making the sample size for this comparison quite large. # #", "of these other test statistics will also reject the null hypothesis. Thus, we", "# the probability of each potential edge in the network occuring to be", "sampled independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p)", "reject the null hypothesis. Thus, we will # need ways of telling whether", "~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is a difference likely to be observed", "under the ER model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" #", "p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately, the problem of", "is well studied. # In our case, we will use Fisher's Exact test", "{glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and # right, # respectively, making the", "$1 ... n$, the probability of the edge $(i, j)$ occuring is: #", "densities is biological (e.g. a result of slightly differing rates of # development", "$$ A_{ij} \\sim Bernoulli(p) $$ # For a network modeled as described above,", "statistic (e.g. # clustering coefficient, number of triangles, etc), as well as many", "# $$ P[A_{ij} = 1] = p_{ij} = p $$ # Where $p$", "density. Thus, if the densities are different, it is likely that tests based", "distributed # $$ A \\sim ER(n, p) $$ # ``` # Thus, for", "rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0,", "are different, it is likely that tests based on any # of these", "- 1), p^{(R)})$$ # To compare the two networks, we are just interested", "are significantly different. This test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that", "is the sum of # independent Bernoulli trials with the same probability. If", "**network density**. #%% [markdown] # ## Testing under the ER model # In", "will consider in this paper, are strongly related to the network # density.", "glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY", "DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0 = time.time()", "testing for equal proportions is well studied. # In our case, we will", "load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%% # describe ER", "densities between the left and right hemisphere induced subgraphs. We see # that", "np import pandas as pd import seaborn as sns from giskard.plot import merge_axes,", "version of our hypotheis of bilateral symmetry. We note that # while the", "under the ER model, # we ran a two-sided Fisher's exact test, which", "the simplest network model* yields a significant # difference between brain hemispheres for", "0) fig = Figure( (methods.width + density.width) * 0.9, (methods.height) * 0.9, methods_panel,", "of edges on the right, then we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} -", "\"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not", "model* yields a significant # difference between brain hemispheres for this organism. It", "# $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and independently, # $$m^{(R)} \\sim", "= FIG_PATH / FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200)", "merge_axes, soft_axis_off from graspologic.simulations import er_np from matplotlib.collections import LineCollection from pkg.data import", "could be # explained by this difference in density alone. #%% FIG_PATH =", "referred to as the **network density**. #%% [markdown] # ## Testing under the", "from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import er_np from matplotlib.collections import LineCollection", "note that # while the difference between estimated densities is not massive, this", "# important considerations for other tests. Almost any network statistic (e.g. # clustering", "= np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2, figsize=(6,", "a difference likely to be observed by chance under the ER model, #", "{glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong # evidence to reject this version of", "probability, $p$. This is sometimes also referred to as the **network density**. #%%", "comparison. We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges", "right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left)", "weight=\"bold\") ) density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel = Panel(", "significantly different. # ```{admonition} Math # Under this # model, the total number", "/ \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods, Text(\"A) Density test methods\",", "# # ```{admonition} Math # Let $n$ be the number of nodes. We", "these other tests could be # explained by this difference in density alone.", "sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0]", "simple comparison of proportions. In many ways, # the ER model is too", "~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote", "for these other tests could be # explained by this difference in density", "p) if i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A,", "right hemisphere networks. The # estimated density (probability of any edge across the", "We see # that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and #", "on any # of these other test statistics will also reject the null", "\\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To compare the two networks, we are", "two independent binomials are significantly different. This test yields a # p-value of", "FIG_PATH = FIG_PATH / FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\")", "(e.g. # clustering coefficient, number of triangles, etc), as well as many of", "of nodes. This is because the number of edges is the sum of", "right, then we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and", "ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue, misc =", "of triangles, etc), as well as many of the model-based # parameters we", "ER(n, p) $$ # ``` # Thus, for this model, the only parameter", "## Testing under the ER model # In order to compare two networks", "suggesting that we have strong # evidence to reject this version of our", "#%% [markdown] # ## The Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER) model**", "individual), an artifact of how the data was collected (e.g. # technological limitations", "](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest network models. This model treats #", "be # explained by this difference in density alone. #%% FIG_PATH = FIG_PATH", "p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately, the problem of testing for", "statistical test to see if these densities are significantly different. # ```{admonition} Math", "occuring is: # $$ P[A_{ij} = 1] = p_{ij} = p $$ #", "is then sampled independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij}", "and right hemisphere induced subgraphs. We see # that the density on the", "other words, all edges between any two nodes are equally likely. # #", "the network occuring to be the same. In # other words, all edges", "[markdown] # ## Testing under the ER model # In order to compare", "\"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1, A2)", "ylim=(0, 1)) ax = axs[1, 1] ax.axis(\"off\") x = 0 y = 0.55", "# other words, all edges between any two nodes are equally likely. #", "er_np from matplotlib.collections import LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io", "the ER model, # we ran a two-sided Fisher's exact test, which tests", "are equally likely. # # ```{admonition} Math # Let $n$ be the number", "# edges on the left # hemisphere, and $m^{(R)}$ is the number of", "= axs[1, 1] ax.axis(\"off\") x = 0 y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8,", "gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18 for i, p in enumerate(ps): A", "label_text = r\"$p = $\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig,", "probabilities # between two independent binomials are significantly different. This test yields a", "default_glue from pkg.io import savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er", "for other tests. Almost any network statistic (e.g. # clustering coefficient, number of", "rainbowarrow DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME,", "right hemisphere induced subgraphs. We see # that the density on the left", "# Where $p$ is the the global connection probability. # Each element of", "ha=\"right\", labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1, A2) ax = axs[0, 1]", "number of edges on the right, then we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)}", "0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i ==", "network), $\\hat{p}$, for # the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the", "ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1, 1] ax.axis(\"off\") x = 0 y", "be observed by chance under the ER model, # we ran a two-sided", "parameters we will consider in this paper, are strongly related to the network", "of interest is the global connection # probability, $p$. This is sometimes also", "plot_density from pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose import Figure,", "$$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To compare the two networks, we", "# [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p) $$ # For a network", "Where $p$ is the the global connection probability. # Each element of the", "node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs", "as sns from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import er_np from matplotlib.collections", "collected (e.g. # technological limitations causing slightly lower reconstruction rates on the left", "0) label_text = f\"{p}\" if i == 0: label_text = r\"$p = $\"", "artifact of how the data was collected (e.g. # technological limitations causing slightly", "probability of the edge $(i, j)$ occuring is: # $$ P[A_{ij} = 1]", "networks, we are just interested in a comparison of $p^{(L)}$ vs. # $p^{(R)}$.", "# ```{admonition} Math # Under this # model, the total number of edges", "have strong # evidence to reject this version of our hypotheis of bilateral", "likely. # # ```{admonition} Math # Let $n$ be the number of nodes.", "# ## Testing under the ER model # In order to compare two", "equally likely. # # ```{admonition} Math # Let $n$ be the number of", "Bernoulli trials with the same probability. If $m^{(L)}$ is the number of #", "matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms import numpy as np import pandas", "# ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated", "reconstruction rates on the left # hemisphere), or something else entirely. Still, the", "the **network density**. #%% [markdown] # ## Testing under the ER model #", "import seaborn as sns from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import er_np", "bilateral symmetry. We note that # while the difference between estimated densities is", "axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text = f\"{p}\" if i", "= er_np(n, p) if i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0,", "on the left # hemisphere, and $m^{(R)}$ is the number of edges on", "\\quad H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately, the problem of testing for equal", "# $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To compare the two networks,", "network densities ($p^{(L)}$ and $p^{(R)}$), and then # run a statistical test to", "node_data, ax=ax, compute_layout=i == 0) label_text = f\"{p}\" if i == 0: label_text", "related to the network # density. Thus, if the densities are different, it", "= Panel( density, Text(\"B) Density comparison\", 5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width *", "Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\"", "hemisphere networks. The # estimated density (probability of any edge across the entire", "FIG_PATH / FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10,", "nodes are equally likely. # # ```{admonition} Math # Let $n$ be the", "is distributed # $$ A \\sim ER(n, p) $$ # ``` # Thus,", "glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] # ## Reject", "compute_layout=i == 0) label_text = f\"{p}\" if i == 0: label_text = r\"$p", "($p^{(L)}$ and $p^{(R)}$), and then # run a statistical test to see if", "10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0) fig = Figure( (methods.width +", "denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated parameter $\\hat{p}$. The p-value", "# $$H_0: p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately, the", "pandas as pd import seaborn as sns from giskard.plot import merge_axes, soft_axis_off from", "edge across the entire network), $\\hat{p}$, for # the left # hemisphere is", "that tests based on any # of these other test statistics will also", "the probability of each potential edge in the network occuring to be the", "misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left,", "independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p) $$", "to be observed by chance under the ER model, # we ran a", "= 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] #", "$p^{(R)}$), and then # run a statistical test to see if these densities", "fig) #%% A1, A2, node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette =", "# it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals", "labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\",", "in the network occuring to be the same. In # other words, all", "Density test # Here, we compare the two unmatched networks by treating each", "ways of telling whether an observed difference for these other tests could be", "edges}{\\# \\ potential \\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2),", "[**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest network models.", "are strongly related to the network # density. Thus, if the densities are", "densities ($p^{(L)}$ and $p^{(R)}$), and then # run a statistical test to see", "FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY =", "also provide # important considerations for other tests. Almost any network statistic (e.g.", "right_adj, right_nodes = load_unmatched(\"right\") #%% # describe ER model np.random.seed(8888) ps = [0.2,", "tests based on any # of these other test statistics will also reject", "$\\hat{p}$, for # the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right", "and $m^{(R)}$ is the number of edges on the right, then we have:", "different. # ```{admonition} Math # Under this # model, the total number of", "import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test from", "shows the comparison of # the network densities between the left and right", "networks $A^{(L)}$ and $A^{(R)}$ under this model, we # simply need to compute", "fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\",", "methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods, Text(\"A) Density test methods\", 5, 10,", "difference in density alone. #%% FIG_PATH = FIG_PATH / FILENAME fontsize = 12", "sided Fisher's exact test). # ``` #%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows", "the success probabilities # between two independent binomials are significantly different. This test", "$m^{(L)}$ is the number of # edges on the left # hemisphere, and", "other tests could be # explained by this difference in density alone. #%%", "Math # Under this # model, the total number of edges $m$ comes", "soft_axis_off from graspologic.simulations import er_np from matplotlib.collections import LineCollection from pkg.data import load_network_palette,", "# sided Fisher's exact test). # ``` #%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>`", "words, all edges between any two nodes are equally likely. # # ```{admonition}", "# symmetry, they have not meant such a simple comparison of proportions. In", "# alternative hypotheses above. # ``` #%% import datetime import time import matplotlib.path", "r\"$p = \\frac{\\# \\ edges}{\\# \\ potential \\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\")", "interest is the global connection # probability, $p$. This is sometimes also referred", "the difference between estimated densities is not massive, this low p-value # results", "the number of # edges on the left # hemisphere, and $m^{(R)}$ is", "different, it is likely that tests based on any # of these other", "any edge across the entire network), $\\hat{p}$, for # the left # hemisphere", "coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown]", "The Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is", "erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text( 0.4, 0.2, r\"$p = \\frac{\\# \\", "problem of testing for equal proportions is well studied. # In our case,", "ways, # the ER model is too simple to be an interesting description", "If $m^{(L)}$ is the number of # edges on the left # hemisphere,", "from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\" def gluefig(name,", "/ \"composite.svg\") fig #%% elapsed = time.time() - t0 delta = datetime.timedelta(seconds=elapsed) print(f\"Script", "# parameters we will consider in this paper, are strongly related to the", "their estimated densities. #%% [markdown] # ## The Erdos-Renyi (ER) model # The", "model # In order to compare two networks $A^{(L)}$ and $A^{(R)}$ under this", "from a $Binomial(n(n-1), p)$ distribution, # where $n$ is the number of nodes.", "ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1, A2, node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data),", "This is sometimes also referred to as the **network density**. #%% [markdown] #", "the global connection probability. # Each element of the adjacency matrix $A$ is", "Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated parameter $\\hat{p}$.", "# of these other test statistics will also reject the null hypothesis. Thus,", "and right hemisphere networks. The # estimated density (probability of any edge across", "0.4, 0.2, r\"$p = \\frac{\\# \\ edges}{\\# \\ potential \\ edges}$\", ha=\"center\", va=\"center\",", "= SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density, Text(\"B) Density", "ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"],", "elapsed = time.time() - t0 delta = datetime.timedelta(seconds=elapsed) print(f\"Script took {delta}\") print(f\"Completed at", "density, Text(\"B) Density comparison\", 5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0)", "**kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close() def glue(name,", "# estimated density (probability of any edge across the entire network), $\\hat{p}$, for", "ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax =", "fig, axs = plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n", "limitations causing slightly lower reconstruction rates on the left # hemisphere), or something", "they have not meant such a simple comparison of proportions. In many ways,", "the number of nodes. We say that for all $(i, j), i \\neq", "pd import seaborn as sns from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import", "of any edge across the entire network), $\\hat{p}$, for # the left #", "we ran a two-sided Fisher's exact test, which tests whether the success probabilities", "pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH from pkg.io import glue", "respectively, making the sample size for this comparison quite large. # # To", "between brain hemispheres for this organism. It is unclear whether this # difference", "if i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data,", "density\") gluefig(\"er_explain\", fig) #%% A1, A2, node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int)", "this paper, are strongly related to the network # density. Thus, if the", "from pkg.io import FIG_PATH from pkg.io import glue as default_glue from pkg.io import", "that these densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's", "Text(\"B) Density comparison\", 5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0) fig", "sample size for this comparison quite large. # # To our knowledge, when", "while for the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote #", "stat, pvalue, misc = erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text( 0.4, 0.2,", "soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1))", "1] ax.text( 0.4, 0.2, r\"$p = \\frac{\\# \\ edges}{\\# \\ potential \\ edges}$\",", "6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel(", "rates on the left # hemisphere), or something else entirely. Still, the ER", "ax = axs[0, 1] ax.text( 0.4, 0.2, r\"$p = \\frac{\\# \\ edges}{\\# \\", "is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine #", "our knowledge, when neuroscientists have considered the question of bilateral # symmetry, they", "\\frac{\\# \\ edges}{\\# \\ potential \\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\")", "the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test). # ```", "= \\frac{\\# \\ edges}{\\# \\ potential \\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare", "note that *even the simplest network model* yields a significant # difference between", "of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong # evidence to reject this version", "# difference in densities is biological (e.g. a result of slightly differing rates", "palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax", "test for the null and # alternative hypotheses above. # ``` #%% import", "right_nodes = load_unmatched(\"right\") #%% # describe ER model np.random.seed(8888) ps = [0.2, 0.4,", "this comparison quite large. # # To our knowledge, when neuroscientists have considered", "import Figure, Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME", "**kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name,", "structure. # However, we note that *even the simplest network model* yields a", "load_node_palette, load_unmatched from pkg.io import FIG_PATH from pkg.io import glue as default_glue from", "18 for i, p in enumerate(ps): A = er_np(n, p) if i ==", "constrained_layout=True, ) n = 18 for i, p in enumerate(ps): A = er_np(n,", "be an interesting description of connectome structure. # However, we note that *even", "\\ edges}{\\# \\ potential \\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5,", "right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence", "[markdown] # ## Reject bilateral symmetry under the ER model #%% [markdown] #", "well studied. # In our case, we will use Fisher's Exact test to", "as pd import seaborn as sns from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations", "from svgutils.compose import Figure, Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS =", "triangles, etc), as well as many of the model-based # parameters we will", "simplest network models. This model treats # the probability of each potential edge", "test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong #", "{glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated parameter $\\hat{p}$. The p-value for testing", "= load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes =", "is the global connection # probability, $p$. This is sometimes also referred to", "if these densities are significantly different. # ```{admonition} Math # Under this #", "networks. The # estimated density (probability of any edge across the entire network),", "r\"$p = $\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1)", "in a comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we are testing: #", "1] ax.axis(\"off\") x = 0 y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\",", "glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage = 0.95", "= merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12)", "methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed = time.time() - t0", "number of # edges on the left # hemisphere, and $m^{(R)}$ is the", "this low p-value # results from the large sample size for this comparison.", "To determine # whether this is a difference likely to be observed by", "across the entire network), $\\hat{p}$, for # the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`,", "coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] # ## Reject bilateral symmetry under the ER", "methods, Text(\"A) Density test methods\", 5, 10, size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH", "\\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax", "axs[0, 1] ax.text( 0.4, 0.2, r\"$p = \\frac{\\# \\ edges}{\\# \\ potential \\", "else entirely. Still, the ER test results also provide # important considerations for", "between two independent binomials are significantly different. This test yields a # p-value", "= misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\",", "p) $$ # ``` # Thus, for this model, the only parameter of", "and independently, # $$m^{(R)} \\sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # To compare the", "we will use Fisher's Exact test to run this test for the null", "symmetry. We note that # while the difference between estimated densities is not", "$$ A \\sim ER(n, p) $$ # ``` # Thus, for this model,", "of estimated densities for the left and right hemisphere networks. The # estimated", ") density_panel.move(methods.width * 0.9, 0) fig = Figure( (methods.width + density.width) * 0.9,", "of bilateral # symmetry, they have not meant such a simple comparison of", "Erdos-Renyi network # and simply compare their estimated densities. #%% [markdown] # ##", "fig, figure=True) if not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name, var, FILENAME,", "low p-value # results from the large sample size for this comparison. We", "not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0 =", "have considered the question of bilateral # symmetry, they have not meant such", "size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0) fig = Figure( (methods.width + density.width)", "this organism. It is unclear whether this # difference in densities is biological", "ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100,", "density**. #%% [markdown] # ## Testing under the ER model # In order", "Density test methods\", 5, 10, size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH / \"er_density.svg\")", "ax.text( 0.4, 0.2, r\"$p = \\frac{\\# \\ edges}{\\# \\ potential \\ edges}$\", ha=\"center\",", "ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel(\"Increasing density\") gluefig(\"er_explain\", fig) #%% A1, A2, node_data =", "0 y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%%", "two networks, we are just interested in a comparison of $p^{(L)}$ vs. #", "node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, )", "(methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed =", "a result of slightly differing rates of # development for this individual), an", "hemisphere), or something else entirely. Still, the ER test results also provide #", "p_{ij} = p $$ # Where $p$ is the the global connection probability.", "# ## The Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model)", "we will # need ways of telling whether an observed difference for these", "n_possible_right) density_left = misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\")", "number of edges $m$ comes from a $Binomial(n(n-1), p)$ distribution, # where $n$", "ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1,", "and then # run a statistical test to see if these densities are", "Thus, for this model, the only parameter of interest is the global connection", "0.5]), constrained_layout=True, ) n = 18 for i, p in enumerate(ps): A =", "be the number of nodes. We say that for all $(i, j), i", "and # right, # respectively, making the sample size for this comparison quite", "# :name: \"fig:er_unmatched_test-er_density\" # # Comparison of estimated densities for the left and", "p^{(R)}$$ # Fortunately, the problem of testing for equal proportions is well studied.", "left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%% # describe ER model", "edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax =", "numpy as np import pandas as pd import seaborn as sns from giskard.plot", "= [0.2, 0.4, 0.6] n_steps = len(ps) fig, axs = plt.subplots( 2, n_steps,", "$p$ is the the global connection probability. # Each element of the adjacency", "edges on the left # hemisphere, and $m^{(R)}$ is the number of edges", "palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] # ## Reject bilateral symmetry under the", "1), p^{(R)})$$ # To compare the two networks, we are just interested in", "misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left = misc[\"probability1\"] density_right =", "#%% [markdown] # ## Reject bilateral symmetry under the ER model #%% [markdown]", "= len(ps) fig, axs = plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True,", "pvalue, misc = erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text( 0.4, 0.2, r\"$p", "trials with the same probability. If $m^{(L)}$ is the number of # edges", "under the ER model # In order to compare two networks $A^{(L)}$ and", "We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on", "In order to compare two networks $A^{(L)}$ and $A^{(R)}$ under this model, we", "i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data, ax=ax,", "A2, node_data = sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig,", "fontsize = 12 methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel =", "densities is not massive, this low p-value # results from the large sample", "alternative hypotheses above. # ``` #%% import datetime import time import matplotlib.path import", "other tests. Almost any network statistic (e.g. # clustering coefficient, number of triangles,", "entire network), $\\hat{p}$, for # the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for", "two unmatched networks by treating each as an Erdos-Renyi network # and simply", "connectome structure. # However, we note that *even the simplest network model* yields", "import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\" def gluefig(name, fig, **kwargs):", "i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text = f\"{p}\" if i ==", "= True FILENAME = \"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name,", "form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left", "of connectome structure. # However, we note that *even the simplest network model*", "\\neq p^{(R)}$$ # Fortunately, the problem of testing for equal proportions is well", "unmatched networks by treating each as an Erdos-Renyi network # and simply compare", "will also reject the null hypothesis. Thus, we will # need ways of", "many ways, # the ER model is too simple to be an interesting", "left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine", "estimated density (probability of any edge across the entire network), $\\hat{p}$, for #", "5, 10, size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15)", "size for this comparison. We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and #", "The p-value for testing # the null hypothesis that these densities are the", "the ER model is too simple to be an interesting description of connectome", "{number} <fig:er_unmatched_test-er_density>` shows the comparison of # the network densities between the left", "edges on the right, then we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1),", "$A^{(L)}$ and $A^{(R)}$ under this model, we # simply need to compute these", "the left and right hemisphere induced subgraphs. We see # that the density", "observed by chance under the ER model, # we ran a two-sided Fisher's", "statistics will also reject the null hypothesis. Thus, we will # need ways", "coefficient, number of triangles, etc), as well as many of the model-based #", "for the left and right hemisphere networks. The # estimated density (probability of", "say that for all $(i, j), i \\neq j$, with $i$ and #", "and # on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this", "have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and independently, # $$m^{(R)}", "n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18 for i, p", "pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose import Figure, Panel, Text", "FILENAME = \"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True)", "edges between any two nodes are equally likely. # # ```{admonition} Math #", "0.95 glue(\"coverage\", coverage, form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] # ##", "slightly lower reconstruction rates on the left # hemisphere), or something else entirely.", "import glue as default_glue from pkg.io import savefig from pkg.plot import SmartSVG, networkplot_simple,", "\"fig:er_unmatched_test-er_density\" # # Comparison of estimated densities for the left and right hemisphere", "20) methods_panel = Panel( methods, Text(\"A) Density test methods\", 5, 10, size=fontsize, weight=\"bold\")", "# explained by this difference in density alone. #%% FIG_PATH = FIG_PATH /", "is the number of # edges on the left # hemisphere, and $m^{(R)}$", "test to run this test for the null and # alternative hypotheses above.", "ER test results also provide # important considerations for other tests. Almost any", "is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines", "see if these densities are significantly different. # ```{admonition} Math # Under this", "densities are different, it is likely that tests based on any # of", "with the same probability. If $m^{(L)}$ is the number of # edges on", "ER model, # we ran a two-sided Fisher's exact test, which tests whether", "va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1, 1]", "#%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # # Comparison of estimated", "Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one", "symmetry, they have not meant such a simple comparison of proportions. In many", "draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue, misc =", "stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"]", "we compare the two unmatched networks by treating each as an Erdos-Renyi network", "erdos_renyi_test(left_adj, right_adj) glue(\"pvalue\", pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\",", "node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0)", "difference between brain hemispheres for this organism. It is unclear whether this #", "import load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH from pkg.io import glue as", "rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap=\"Blues\", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([])", "such a simple comparison of proportions. In many ways, # the ER model", "draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\" def gluefig(name, fig, **kwargs): savefig(name,", "```{admonition} Math # Under this # model, the total number of edges $m$", "chance under the ER model, # we ran a two-sided Fisher's exact test,", "interested in a comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we are testing:", "likely that tests based on any # of these other test statistics will", "15) density_panel = Panel( density, Text(\"B) Density comparison\", 5, 10, size=fontsize, weight=\"bold\") )", "= $\" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor(\"w\") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax)", "in enumerate(ps): A = er_np(n, p) if i == 0: node_data = pd.DataFrame(index=np.arange(n))", "= pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text", "model np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps = len(ps) fig, axs =", "other test statistics will also reject the null hypothesis. Thus, we will #", "Comparison of estimated densities for the left and right hemisphere networks. The #", "estimated densities. #%% [markdown] # ## The Erdos-Renyi (ER) model # The [**Erdos-Renyi", "the simplest network models. This model treats # the probability of each potential", "difference for these other tests could be # explained by this difference in", "#%% n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right) density_left =", "fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed = time.time() - t0 delta = datetime.timedelta(seconds=elapsed)", "Let $n$ be the number of nodes. We say that for all $(i,", "``` #%% import datetime import time import matplotlib.path import matplotlib.pyplot as plt import", "rotation=0, ha=\"right\", labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\",", "methods.move(10, 20) methods_panel = Panel( methods, Text(\"A) Density test methods\", 5, 10, size=fontsize,", "large sample size for this comparison. We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f`", "too simple to be an interesting description of connectome structure. # However, we", "[0.2, 0.4, 0.6] n_steps = len(ps) fig, axs = plt.subplots( 2, n_steps, figsize=(6,", "j$, with $i$ and # $j$ both running # from $1 ... n$,", "sample_toy_networks from svgutils.compose import Figure, Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS", "results also provide # important considerations for other tests. Almost any network statistic", "The # estimated density (probability of any edge across the entire network), $\\hat{p}$,", "organism. It is unclear whether this # difference in densities is biological (e.g.", "sample size for this comparison. We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and", "two networks $A^{(L)}$ and $A^{(R)}$ under this model, we # simply need to", "#%% [markdown] # ## Testing under the ER model # In order to", "comparison of # the network densities between the left and right hemisphere induced", "as well as many of the model-based # parameters we will consider in", "methods_panel = Panel( methods, Text(\"A) Density test methods\", 5, 10, size=fontsize, weight=\"bold\") )", "as described above, we say it is distributed # $$ A \\sim ER(n,", "= plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax)", "between any two nodes are equally likely. # # ```{admonition} Math # Let", "considerations for other tests. Almost any network statistic (e.g. # clustering coefficient, number", "# In order to compare two networks $A^{(L)}$ and $A^{(R)}$ under this model,", "the sum of # independent Bernoulli trials with the same probability. If $m^{(L)}$", "is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test). # ``` #%% [markdown]", "form=\"2.0f%\") plot_density(misc, palette=network_palette, coverage=coverage) gluefig(\"er_density\", fig) #%% [markdown] # ## Reject bilateral symmetry", "$i$ and # $j$ both running # from $1 ... n$, the probability", "evidence to reject this version of our hypotheis of bilateral symmetry. We note", "0.6] n_steps = len(ps) fig, axs = plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2,", "axs[1, 1] ax.axis(\"off\") x = 0 y = 0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax,", "significant # difference between brain hemispheres for this organism. It is unclear whether", "brain hemispheres for this organism. It is unclear whether this # difference in", "are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and #", "= load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%% # describe", "ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1,", "# model, the total number of edges $m$ comes from a $Binomial(n(n-1), p)$", "# while the difference between estimated densities is not massive, this low p-value", "see # that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on", "left_nodes = load_unmatched(\"left\") right_adj, right_nodes = load_unmatched(\"right\") #%% # describe ER model np.random.seed(8888)", "This is because the number of edges is the sum of # independent", "just interested in a comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we are", "between estimated densities is not massive, this low p-value # results from the", "* 0.9, 0) fig = Figure( (methods.width + density.width) * 0.9, (methods.height) *", "$$ # Where $p$ is the the global connection probability. # Each element", "by this difference in density alone. #%% FIG_PATH = FIG_PATH / FILENAME fontsize", "for all $(i, j), i \\neq j$, with $i$ and # $j$ both", "f\"{p}\" if i == 0: label_text = r\"$p = $\" + label_text ax.set_title(label_text,", "likely to be observed by chance under the ER model, # we ran", "then we have: # $$m^{(L)} \\sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # and independently,", "Density comparison\", 5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0) fig =", "the network # density. Thus, if the densities are different, it is likely", "ER model is too simple to be an interesting description of connectome structure.", "plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute", "glue as default_glue from pkg.io import savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme", "# Each element of the adjacency matrix $A$ is then sampled independently according", "(e.g. a result of slightly differing rates of # development for this individual),", "= axs[0, 1] ax.text( 0.4, 0.2, r\"$p = \\frac{\\# \\ edges}{\\# \\ potential", "import FIG_PATH from pkg.io import glue as default_glue from pkg.io import savefig from", "= Panel( methods, Text(\"A) Density test methods\", 5, 10, size=fontsize, weight=\"bold\") ) density", "any # of these other test statistics will also reject the null hypothesis.", "the same probability. If $m^{(L)}$ is the number of # edges on the", "is a difference likely to be observed by chance under the ER model,", "# whether this is a difference likely to be observed by chance under", "any network statistic (e.g. # clustering coefficient, number of triangles, etc), as well", "between the left and right hemisphere induced subgraphs. We see # that the", "this version of our hypotheis of bilateral symmetry. We note that # while", "model, we # simply need to compute these network densities ($p^{(L)}$ and $p^{(R)}$),", "etc), as well as many of the model-based # parameters we will consider", "from pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density from pkg.stats import", "In our case, we will use Fisher's Exact test to run this test", "from $1 ... n$, the probability of the edge $(i, j)$ occuring is:", "$$H_0: p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately, the problem", "the only parameter of interest is the global connection # probability, $p$. This", "# from $1 ... n$, the probability of the edge $(i, j)$ occuring", "plt import matplotlib.transforms import numpy as np import pandas as pd import seaborn", "# # To our knowledge, when neuroscientists have considered the question of bilateral", "# ``` #%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of #", "def glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette,", "$$ P[A_{ij} = 1] = p_{ij} = p $$ # Where $p$ is", "lower reconstruction rates on the left # hemisphere), or something else entirely. Still,", "when neuroscientists have considered the question of bilateral # symmetry, they have not", "density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density, Text(\"B)", "plt.close() def glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25)", "# confidence intervals for this estimated parameter $\\hat{p}$. The p-value for testing #", "node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue, misc", "density\") ax.set_ylabel( \"Left\", color=network_palette[\"Left\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax = axs[1, 0]", "[markdown] # ## The Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER) model** #", "$\\hat{p}$. The p-value for testing # the null hypothesis that these densities are", "{glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and # right, #", "a significant # difference between brain hemispheres for this organism. It is unclear", "same probability. If $m^{(L)}$ is the number of # edges on the left", "to run this test for the null and # alternative hypotheses above. #", "ax = axs[1, 1] ax.axis(\"off\") x = 0 y = 0.55 draw_hypothesis_box(\"er\", -0.2,", "$(i, j), i \\neq j$, with $i$ and # $j$ both running #", "**kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette()", "hypothesis that these densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided", "it is distributed # $$ A \\sim ER(n, p) $$ # ``` #", "ax = axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text = f\"{p}\"", "misc[\"probability1\"] density_right = misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"]", "= erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text( 0.4, 0.2, r\"$p = \\frac{\\#", "axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data,", "# ```{glue:figure} fig:er_unmatched_test-er_density # :name: \"fig:er_unmatched_test-er_density\" # # Comparison of estimated densities for", "two-sided Fisher's exact test, which tests whether the success probabilities # between two", "hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black", ":name: \"fig:er_unmatched_test-er_density\" # # Comparison of estimated densities for the left and right", "SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density, Text(\"B) Density comparison\",", "to reject this version of our hypotheis of bilateral symmetry. We note that", "be the same. In # other words, all edges between any two nodes", "form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage = 0.95 glue(\"coverage\", coverage,", "graspologic.simulations import er_np from matplotlib.collections import LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched", "sns from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import er_np from matplotlib.collections import", "run this test for the null and # alternative hypotheses above. # ```", "hypotheis of bilateral symmetry. We note that # while the difference between estimated", "# results from the large sample size for this comparison. We note that", "the edge $(i, j)$ occuring is: # $$ P[A_{ij} = 1] = p_{ij}", "{numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of # the network densities between the", "Panel( methods, Text(\"A) Density test methods\", 5, 10, size=fontsize, weight=\"bold\") ) density =", "hemispheres for this organism. It is unclear whether this # difference in densities", "estimated parameter $\\hat{p}$. The p-value for testing # the null hypothesis that these", "i, p in enumerate(ps): A = er_np(n, p) if i == 0: node_data", "total number of edges $m$ comes from a $Binomial(n(n-1), p)$ distribution, # where", "ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1, 1] ax.axis(\"off\") x", "the left and right hemisphere networks. The # estimated density (probability of any", "10, size=fontsize, weight=\"bold\") ) density = SmartSVG(FIG_PATH / \"er_density.svg\") density.set_height(methods.height) density.move(10, 15) density_panel", "import numpy as np import pandas as pd import seaborn as sns from", "color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1, A2) ax", "[markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of # the network densities", "= axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text = f\"{p}\" if", "p-value for testing # the null hypothesis that these densities are the same", "enumerate(ps): A = er_np(n, p) if i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax", "alone. #%% FIG_PATH = FIG_PATH / FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH", "rotation=0, ha=\"right\", labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1, A2) ax = axs[0,", "an observed difference for these other tests could be # explained by this", "* 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed = time.time()", "we # simply need to compute these network densities ($p^{(L)}$ and $p^{(R)}$), and", "confidence intervals for this estimated parameter $\\hat{p}$. The p-value for testing # the", "set_theme from pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks", "= 1] = p_{ij} = p $$ # Where $p$ is the the", "len(ps) fig, axs = plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, )", "our hypotheis of bilateral symmetry. We note that # while the difference between", "<fig:er_unmatched_test-er_density>` shows the comparison of # the network densities between the left and", "significantly different. This test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we", "distribution, # where $n$ is the number of nodes. This is because the", "a two-sided Fisher's exact test, which tests whether the success probabilities # between", "figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title(\"Compute global\\nconnection density\")", "null hypothesis. Thus, we will # need ways of telling whether an observed", "testing # the null hypothesis that these densities are the same is #", "lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated parameter $\\hat{p}$. The", "matrix $A$ is then sampled independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): #", "compute these network densities ($p^{(L)}$ and $p^{(R)}$), and then # run a statistical", "(ER) model # The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of", "of nodes. We say that for all $(i, j), i \\neq j$, with", "comparison of proportions. In many ways, # the ER model is too simple", "simply need to compute these network densities ($p^{(L)}$ and $p^{(R)}$), and then #", "set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched(\"left\")", "is unclear whether this # difference in densities is biological (e.g. a result", ") stat, pvalue, misc = erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text( 0.4,", "# {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated parameter $\\hat{p}$. The p-value for", "order to compare two networks $A^{(L)}$ and $A^{(R)}$ under this model, we #", "= misc[\"probability2\"] glue(\"density_left\", density_left, form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right =", "from the large sample size for this comparison. We note that there are", "case, we will use Fisher's Exact test to run this test for the", "sample_toy_networks() node_data[\"labels\"] = np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2,", "edge $(i, j)$ occuring is: # $$ P[A_{ij} = 1] = p_{ij} =", "= p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately, the problem of testing", "running # from $1 ... n$, the probability of the edge $(i, j)$", "test statistics will also reject the null hypothesis. Thus, we will # need", "the number of edges is the sum of # independent Bernoulli trials with", "# that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the", "we say it is distributed # $$ A \\sim ER(n, p) $$ #", "not meant such a simple comparison of proportions. In many ways, # the", "= time.time() - t0 delta = datetime.timedelta(seconds=elapsed) print(f\"Script took {delta}\") print(f\"Completed at {datetime.datetime.now()}\")", "0.9, (methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed", "network occuring to be the same. In # other words, all edges between", "{1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0,", "pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\" def gluefig(name, fig,", "0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) stat,", "= f\"{p}\" if i == 0: label_text = r\"$p = $\" + label_text", "in densities is biological (e.g. a result of slightly differing rates of #", "3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18 for i, p in enumerate(ps):", "i \\neq j$, with $i$ and # $j$ both running # from $1", "density_panel.move(methods.width * 0.9, 0) fig = Figure( (methods.width + density.width) * 0.9, (methods.height)", "whether this # difference in densities is biological (e.g. a result of slightly", "ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( \"Right\", color=network_palette[\"Right\"], size=\"large\", rotation=0, ha=\"right\",", "network # density. Thus, if the densities are different, it is likely that", "for i, p in enumerate(ps): A = er_np(n, p) if i == 0:", "p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong # evidence to reject this", "of edges $m$ comes from a $Binomial(n(n-1), p)$ distribution, # where $n$ is", "edge in the network occuring to be the same. In # other words,", "two nodes are equally likely. # # ```{admonition} Math # Let $n$ be", "# Density test # Here, we compare the two unmatched networks by treating", "dtype=int) palette = {1: sns.color_palette(\"Set2\")[2]} fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7))", "how the data was collected (e.g. # technological limitations causing slightly lower reconstruction", "ran a two-sided Fisher's exact test, which tests whether the success probabilities #", "for testing # the null hypothesis that these densities are the same is", "to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p) $$ # For", "methods = SmartSVG(FIG_PATH / \"er_methods.svg\") methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods, Text(\"A)", "differing rates of # development for this individual), an artifact of how the", "Fortunately, the problem of testing for equal proportions is well studied. # In", "test results also provide # important considerations for other tests. Almost any network", "LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH from pkg.io", "of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we are testing: # $$H_0: p^{(L)} =", "Reject bilateral symmetry under the ER model #%% [markdown] # ```{glue:figure} fig:er_unmatched_test-er_density #", "Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME = \"er_unmatched_test\" def", "# ## Reject bilateral symmetry under the ER model #%% [markdown] # ```{glue:figure}", "p $$ # Where $p$ is the the global connection probability. # Each", "difference between estimated densities is not massive, this low p-value # results from", "pkg.io import glue as default_glue from pkg.io import savefig from pkg.plot import SmartSVG,", "distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # $$ A_{ij} \\sim Bernoulli(p) $$ # For a network modeled as", "Almost any network statistic (e.g. # clustering coefficient, number of triangles, etc), as", "import matplotlib.transforms import numpy as np import pandas as pd import seaborn as", "Each element of the adjacency matrix $A$ is then sampled independently according to", "import pandas as pd import seaborn as sns from giskard.plot import merge_axes, soft_axis_off", "this individual), an artifact of how the data was collected (e.g. # technological", "network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched(\"left\") right_adj,", "time import matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms import numpy as np", "of the adjacency matrix $A$ is then sampled independently according to a #", "need to compute these network densities ($p^{(L)}$ and $p^{(R)}$), and then # run", "a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong # evidence to", "technological limitations causing slightly lower reconstruction rates on the left # hemisphere), or", "form=\"0.2g\") glue(\"density_right\", density_right, form=\"0.2g\") n_edges_left = misc[\"observed1\"] n_edges_right = misc[\"observed2\"] #%% coverage =", "# Under this # model, the total number of edges $m$ comes from", "compare their estimated densities. #%% [markdown] # ## The Erdos-Renyi (ER) model #", "potential edges on the left and # right, # respectively, making the sample", "by treating each as an Erdos-Renyi network # and simply compare their estimated", "the two unmatched networks by treating each as an Erdos-Renyi network # and", "density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig #%% elapsed = time.time() - t0 delta", "pvalue, form=\"pvalue\") #%% n_possible_left = misc[\"possible1\"] n_possible_right = misc[\"possible2\"] glue(\"n_possible_left\", n_possible_left) glue(\"n_possible_right\", n_possible_right)", "densities for the left and right hemisphere networks. The # estimated density (probability", "(ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest network models. This", "erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose import Figure, Panel, Text from pkg.plot", "is likely that tests based on any # of these other test statistics", "or something else entirely. Still, the ER test results also provide # important", "is because the number of edges is the sum of # independent Bernoulli", "pkg.utils import sample_toy_networks from svgutils.compose import Figure, Panel, Text from pkg.plot import draw_hypothesis_box,", "networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text = f\"{p}\" if i == 0:", "quite large. # # To our knowledge, when neuroscientists have considered the question", "# simply need to compute these network densities ($p^{(L)}$ and $p^{(R)}$), and then", "model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest network models. This model", "of telling whether an observed difference for these other tests could be #", "different. This test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have", "testing: # $$H_0: p^{(L)} = p^{(R)}, \\quad H_a: p^{(L)} \\neq p^{(R)}$$ # Fortunately,", "paper, are strongly related to the network # density. Thus, if the densities", "time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes =", "potential \\ edges}$\", ha=\"center\", va=\"center\", ) ax.axis(\"off\") ax.set_title(\"Compare ER\\nmodels\") ax.set(xlim=(-0.5, 2), ylim=(0, 1))", "exact test). # ``` #%% [markdown] # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison", "0.55 draw_hypothesis_box(\"er\", -0.2, 0.8, ax=ax, fontsize=\"medium\", yskip=0.2) gluefig(\"er_methods\", fig) #%% stat, pvalue, misc", "5, 10, size=fontsize, weight=\"bold\") ) density_panel.move(methods.width * 0.9, 0) fig = Figure( (methods.width", "size=\"large\", rotation=0, ha=\"right\", labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel(", "density (probability of any edge across the entire network), $\\hat{p}$, for # the", "say it is distributed # $$ A \\sim ER(n, p) $$ # ```", "# Let $n$ be the number of nodes. We say that for all", "element of the adjacency matrix $A$ is then sampled independently according to a", "that we have strong # evidence to reject this version of our hypotheis", "it is likely that tests based on any # of these other test", "# $$ A_{ij} \\sim Bernoulli(p) $$ # For a network modeled as described", "to the network # density. Thus, if the densities are different, it is", "Fisher's exact test, which tests whether the success probabilities # between two independent", "development for this individual), an artifact of how the data was collected (e.g.", "only parameter of interest is the global connection # probability, $p$. This is", "n = 18 for i, p in enumerate(ps): A = er_np(n, p) if", "left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it is #", "of edges is the sum of # independent Bernoulli trials with the same", "<filename>scripts/er_unmatched_test.py #%% [markdown] # # Density test # Here, we compare the two", "any two nodes are equally likely. # # ```{admonition} Math # Let $n$", "# {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of # the network densities between", "Bernoulli(p) $$ # For a network modeled as described above, we say it", "not massive, this low p-value # results from the large sample size for", "* 0.9, (methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / \"composite.svg\") fig #%%", "# ``` #%% import datetime import time import matplotlib.path import matplotlib.pyplot as plt", "of the simplest network models. This model treats # the probability of each", "to as the **network density**. #%% [markdown] # ## Testing under the ER", "pkg.io import FIG_PATH from pkg.io import glue as default_glue from pkg.io import savefig", "this test for the null and # alternative hypotheses above. # ``` #%%", "misc = erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text( 0.4, 0.2, r\"$p =", "(two # sided Fisher's exact test). # ``` #%% [markdown] # {numref}`Figure {number}", "network densities between the left and right hemisphere induced subgraphs. We see #", "difference likely to be observed by chance under the ER model, # we", "# For a network modeled as described above, we say it is distributed" ]
[ "ibvhs = bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip in ibvhs: # print ip", "for the given polints. # # Usage: # ``` # bvh1 = BVH(points1)", "self.isLeaf(): ip = self._line.intersect(bvh._line) if ip is not None: ilt = self._line.closestParam(ip) t_min,", "ilt = self._line.closestParam(ip) t_min, t_max = self._param_range it = (1.0 - ilt) *", "1] = al if al_total > 0.00001: params *= (1.0 / al_total) self._params", "is not None: ibvhs.extend(ibvh) return ibvhs else: return None return None ## Plot", "BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new) ps = np.zeros((num_points, 2))", "0], ps[:, 1], \"-\") ## Plot BVH structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt,", "for pi in range(len(cvs) - 1): al += dist_cvs[pi] params[pi + 1] =", "point is included in the node. def contains(self, p): return self._bb.contains(p) ## Find", "np.array(points) self._points = points self.computeParameters() self._bvh = BVH(points, self._params) ## Return points on", "for (p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges): if t_max > t: pt", "plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is None: return its = []", "= [] self._line = None self._createChildren(points, params) ## Return if the node is", "self._bb.contains(p) ## Find intersections with the given BVH structure. def intersect(self, bvh): if", "self._line.intersect(bvh._line) if ip is not None: ilt = self._line.closestParam(ip) t_min, t_max = self._param_range", "+ 1] points_right = points[len(points) / 2:] params_left = params[:len(points) / 2 +", "cv2 = np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2 = splinePoints(cv2) pl1 = Polyline(ps1)", "(1.0 / al_total) self._params = params ## Find intersected points with the given", "# bvh1 = BVH(points1) # bvh2 = BVH(points2) # ibvhs = bvh1.intersect(bvh2) #", "None ## Plot BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if", "if al_total > 0.00001: params *= (1.0 / al_total) self._params = params ##", "ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\",", "params = self._params t_ranges = zip(params[:-1], params[1:]) points = self._points p_ranges = zip(points[:-1],", "showMaximize cv1 = np.random.rand(4, 2) cv2 = np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2", "import UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb = BoundingBox(cvs) x_new =", "2 + 1] params_right = params[len(points) / 2:] self._children = [BVH(points_left, params_left, self._level", "inversetoon.util.timer import timing_func from inversetoon.np.norm import normVectors ## Simple bounding box hierarchy for", "BVH structure is used for fast intersection. def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh)", "for bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh is not None: ibvhs.extend(ibvh)", "self._points plt.plot(ps[:, 0], ps[:, 1], \"-\") ## Plot BVH structure. def plotBVH(self, plt,", "= zip(points[:-1], points[1:]) for (p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges): if t_max", "ps[:, 1], \"-\") ## Plot BVH structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color)", "intersections: %s %s \" % (len(ibvhs), its)) for it in its: ip =", "al if al_total > 0.00001: params *= (1.0 / al_total) self._params = params", "ip = self.pointAt(it) print ip plt.plot(ip[0] + 0.001, ip[1] + 0.001, \"x\", color=\"g\")", "children in the node. def children(self): if self.isLeaf(): return [self] return self._children ##", "= splinePoints(cv2) pl1 = Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt) pl1.plotBVH(plt, color=\"b\") pl2.plotBVH(plt,", "x_new ps[:, 1] = y_new return ps if __name__ == '__main__': import matplotlib.pyplot", "intersection. def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips = [ip for ibvh1, ibvh2,", "print ip plt.plot(ip[0] + 0.001, ip[1] + 0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100):", "on the polyline. def points(self): return self._points ## Return point at the given", "points = np.array(points) self._points = points self.computeParameters() self._bvh = BVH(points, self._params) ## Return", "Implementation of 2D polyline. class Polyline: ## Constructor def __init__(self, points): self.create(points) ##", "## Plot intersection with BVH structure. def plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh)", "*= (1.0 / al_total) self._params = params ## Find intersected points with the", "%s %s \" % (len(ibvhs), its)) for it in its: ip = self.pointAt(it)", "numpy as np from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import Line from inversetoon.util.timer", "utf-8 -*- ## @package inversetoon.geometry.polyline # # Implementation of 2D polyline. # @author", "ps[:, 1] = y_new return ps if __name__ == '__main__': import matplotlib.pyplot as", "np.random.rand(4, 2) cv2 = np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2 = splinePoints(cv2) pl1", "= [] for self_ch in self.children(): for bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch)", "= BVH(points, self._params) ## Return points on the polyline. def points(self): return self._points", "the given polints. # # Usage: # ``` # bvh1 = BVH(points1) #", "= al if al_total > 0.00001: params *= (1.0 / al_total) self._params =", "def _createChildren(self, points, params): if len(points) < 5: self._points = points self._params =", "## Create 2D polyline with the given points. def create(self, points): points =", "## Plot polyline. def plotPolyline(self, plt): ps = self._points plt.plot(ps[:, 0], ps[:, 1],", "np.sum(dist_cvs) params = np.zeros(len(cvs)) al = 0 for pi in range(len(cvs) - 1):", "plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s \" % (len(ibvhs), its))", "points = self._points p_ranges = zip(points[:-1], points[1:]) for (p_min, p_max), (t_min, t_max) in", "class BVH: ## Constructor def __init__(self, points, params, level=0): self._level = level self._bb", "tody # @date 2015/08/12 import numpy as np from inversetoon.geometry.bounding_box import BoundingBox from", "[np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1]) return points_left = points[:len(points) / 2 +", "return its = [] for ibvh1, ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\",", "return len(self._children) == 0 ## Return the points in the node. def points(self):", "in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh is not None: ibvhs.extend(ibvh) return ibvhs", "0], cvs[:, 1]) bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new =", "self._bvh = BVH(points, self._params) ## Return points on the polyline. def points(self): return", "# @date 2015/08/12 import numpy as np from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line", "Constructor def __init__(self, points): self.create(points) ## Create 2D polyline with the given points.", "its.append(it) plt.title(\"Num intersections: %s %s \" % (len(ibvhs), its)) for it in its:", "0] = x_new ps[:, 1] = y_new return ps if __name__ == '__main__':", "p_min + pt * (p_max - p_min) return None ## Compute arc length", "params): if len(points) < 5: self._points = points self._params = params self._param_range =", "fast intersection. def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips = [ip for ibvh1,", "# ibvhs = bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip in ibvhs: # print", "# for ibvh1, ibvh2, ip in ibvhs: # print ip # ``` class", "plt from inversetoon.plot.window import showMaximize cv1 = np.random.rand(4, 2) cv2 = np.random.rand(4, 2)", "np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new) ps = np.zeros((num_points, 2)) ps[:, 0] =", "self._points = points self.computeParameters() self._bvh = BVH(points, self._params) ## Return points on the", "## Constructor def __init__(self, points): self.create(points) ## Create 2D polyline with the given", "t_max = self._param_range it = (1.0 - ilt) * t_min + ilt *", "zip(params[:-1], params[1:]) points = self._points p_ranges = zip(points[:-1], points[1:]) for (p_min, p_max), (t_min,", "the node. def points(self): return self._points ## Return the children in the node.", "t_max) in zip(p_ranges, t_ranges): if t_max > t: pt = (t - t_min)", "alpha=alpha) if self.isLeaf(): return for bvh in self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points,", "params) ## Return if the node is leaf. def isLeaf(self): return len(self._children) ==", "BoundingBox from inversetoon.geometry.line import Line from inversetoon.util.timer import timing_func from inversetoon.np.norm import normVectors", "self.isLeaf(): return [self] return self._children ## Return true if the given point is", "color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection with BVH structure. def plotIntersection(self, plt, pl):", "self._bvh.intersect(pl._bvh) if ibvhs is None: return its = [] for ibvh1, ibvh2, ip,", "def splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:, 1])", "= np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new) ps = np.zeros((num_points, 2)) ps[:, 0]", "= params self._param_range = [np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1]) return points_left =", "params_right = params[len(points) / 2:] self._children = [BVH(points_left, params_left, self._level + 1), BVH(points_right,", "ibvhs else: return None return None ## Plot BVH. def plotBVH(self, plt, color=\"b\",", "params *= (1.0 / al_total) self._params = params ## Find intersected points with", "as plt from inversetoon.plot.window import showMaximize cv1 = np.random.rand(4, 2) cv2 = np.random.rand(4,", "t_min) / (t_max - t_min) return p_min + pt * (p_max - p_min)", "ps = self._points plt.plot(ps[:, 0], ps[:, 1], \"-\") ## Plot BVH structure. def", "[BVH(points_left, params_left, self._level + 1), BVH(points_right, params_right, self._level + 1)] ## Implementation of", "alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return for bvh in self.children(): bvh.plotBVH(plt, color)", "in its: ip = self.pointAt(it) print ip plt.plot(ip[0] + 0.001, ip[1] + 0.001,", "cv1 = np.random.rand(4, 2) cv2 = np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2 =", "params = np.zeros(len(cvs)) al = 0 for pi in range(len(cvs) - 1): al", "class Polyline: ## Constructor def __init__(self, points): self.create(points) ## Create 2D polyline with", "__name__ == '__main__': import matplotlib.pyplot as plt from inversetoon.plot.window import showMaximize cv1 =", "= splinePoints(cv1) ps2 = splinePoints(cv2) pl1 = Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt)", "Return point at the given parameter. def pointAt(self, t): params = self._params t_ranges", "in self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points, params): if len(points) < 5: self._points", "= np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2 = splinePoints(cv2) pl1 = Polyline(ps1) pl2", "the polyline. def points(self): return self._points ## Return point at the given parameter.", "+ 0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline spl =", "# Implementation of 2D polyline. # @author tody # @date 2015/08/12 import numpy", "for ibvh1, ibvh2, ip, it in ibvhs] return ips ## Plot polyline. def", "``` # bvh1 = BVH(points1) # bvh2 = BVH(points2) # ibvhs = bvh1.intersect(bvh2)", "level self._bb = BoundingBox(points) self._children = [] self._line = None self._createChildren(points, params) ##", "(t_min, t_max) in zip(p_ranges, t_ranges): if t_max > t: pt = (t -", "(1.0 - ilt) * t_min + ilt * t_max return [(self, bvh, ip,", "ibvh2, ip in ibvhs: # print ip # ``` class BVH: ## Constructor", "plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection with BVH structure. def plotIntersection(self, plt,", "import showMaximize cv1 = np.random.rand(4, 2) cv2 = np.random.rand(4, 2) ps1 = splinePoints(cv1)", "BVH(points, self._params) ## Return points on the polyline. def points(self): return self._points ##", "## Return true if the given point is included in the node. def", "def computeParameters(self): cvs = self._points diff_cvs = cvs[1:, :] - cvs[:-1, :] dist_cvs", "2D polyline with the given points. def create(self, points): points = np.array(points) self._points", "inversetoon.plot.window import showMaximize cv1 = np.random.rand(4, 2) cv2 = np.random.rand(4, 2) ps1 =", "def isLeaf(self): return len(self._children) == 0 ## Return the points in the node.", "= self.pointAt(it) print ip plt.plot(ip[0] + 0.001, ip[1] + 0.001, \"x\", color=\"g\") def", "ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s \"", "ibvh1, ibvh2, ip in ibvhs: # print ip # ``` class BVH: ##", "ibvh2, ip, it in ibvhs] return ips ## Plot polyline. def plotPolyline(self, plt):", "+= dist_cvs[pi] params[pi + 1] = al if al_total > 0.00001: params *=", "p): return self._bb.contains(p) ## Find intersections with the given BVH structure. def intersect(self,", "ps2 = splinePoints(cv2) pl1 = Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt) pl1.plotBVH(plt, color=\"b\")", "= level self._bb = BoundingBox(points) self._children = [] self._line = None self._createChildren(points, params)", "as np from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import Line from inversetoon.util.timer import", "ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num", "in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it)", "self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return for bvh in self.children(): bvh.plotBVH(plt, color) def", "self._params t_ranges = zip(params[:-1], params[1:]) points = self._points p_ranges = zip(points[:-1], points[1:]) for", "if the given point is included in the node. def contains(self, p): return", "color=color, alpha=alpha) if self.isLeaf(): return for bvh in self.children(): bvh.plotBVH(plt, color) def _createChildren(self,", "# @author tody # @date 2015/08/12 import numpy as np from inversetoon.geometry.bounding_box import", "points_right = points[len(points) / 2:] params_left = params[:len(points) / 2 + 1] params_right", "= params ## Find intersected points with the given polyline. # # BVH", "np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2 = splinePoints(cv2) pl1 = Polyline(ps1) pl2 =", "self.create(points) ## Create 2D polyline with the given points. def create(self, points): points", "bvh1 = BVH(points1) # bvh2 = BVH(points2) # ibvhs = bvh1.intersect(bvh2) # for", "is not None: ilt = self._line.closestParam(ip) t_min, t_max = self._param_range it = (1.0", "## Return if the node is leaf. def isLeaf(self): return len(self._children) == 0", "ilt) * t_min + ilt * t_max return [(self, bvh, ip, it)] else:", "t_max return [(self, bvh, ip, it)] else: ibvhs = [] for self_ch in", "with BVH structure. def plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is", "np.zeros((num_points, 2)) ps[:, 0] = x_new ps[:, 1] = y_new return ps if", "= Line(self._points[0], self._points[-1]) return points_left = points[:len(points) / 2 + 1] points_right =", "points): points = np.array(points) self._points = points self.computeParameters() self._bvh = BVH(points, self._params) ##", "/ 2:] self._children = [BVH(points_left, params_left, self._level + 1), BVH(points_right, params_right, self._level +", "Polyline: ## Constructor def __init__(self, points): self.create(points) ## Create 2D polyline with the", "+ pt * (p_max - p_min) return None ## Compute arc length parameters.", "given BVH structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip", "plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is None: return its =", "= self._points p_ranges = zip(points[:-1], points[1:]) for (p_min, p_max), (t_min, t_max) in zip(p_ranges,", "its = [] for ibvh1, ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2)", "t): params = self._params t_ranges = zip(params[:-1], params[1:]) points = self._points p_ranges =", "splinePoints(cv2) pl1 = Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt) pl1.plotBVH(plt, color=\"b\") pl2.plotBVH(plt, color=\"g\")", "1], \"-\") ## Plot BVH structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ##", "self._line = Line(self._points[0], self._points[-1]) return points_left = points[:len(points) / 2 + 1] points_right", "create(self, points): points = np.array(points) self._points = points self.computeParameters() self._bvh = BVH(points, self._params)", "UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new", "self._bvh.intersect(pl._bvh) ips = [ip for ibvh1, ibvh2, ip, it in ibvhs] return ips", "node. def children(self): if self.isLeaf(): return [self] return self._children ## Return true if", "params[pi + 1] = al if al_total > 0.00001: params *= (1.0 /", "= self._line.closestParam(ip) t_min, t_max = self._param_range it = (1.0 - ilt) * t_min", "2) ps1 = splinePoints(cv1) ps2 = splinePoints(cv2) pl1 = Polyline(ps1) pl2 = Polyline(ps2)", "Plot BVH structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection with", "bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new) ps =", "2) cv2 = np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2 = splinePoints(cv2) pl1 =", "node is leaf. def isLeaf(self): return len(self._children) == 0 ## Return the points", "- ilt) * t_min + ilt * t_max return [(self, bvh, ip, it)]", "params_left, self._level + 1), BVH(points_right, params_right, self._level + 1)] ## Implementation of 2D", "it in its: ip = self.pointAt(it) print ip plt.plot(ip[0] + 0.001, ip[1] +", "self.children(): for bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh is not None:", "np from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import Line from inversetoon.util.timer import timing_func", "BVH structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection with BVH", "def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips = [ip for ibvh1, ibvh2, ip,", "= bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip in ibvhs: # print ip #", "ip is not None: ilt = self._line.closestParam(ip) t_min, t_max = self._param_range it =", "def __init__(self, points): self.create(points) ## Create 2D polyline with the given points. def", "color=color) ## Plot intersection with BVH structure. def plotIntersection(self, plt, pl): ibvhs =", "# Usage: # ``` # bvh1 = BVH(points1) # bvh2 = BVH(points2) #", "in self.children(): for bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh is not", "node. def points(self): return self._points ## Return the children in the node. def", "children(self): if self.isLeaf(): return [self] return self._children ## Return true if the given", "the given point is included in the node. def contains(self, p): return self._bb.contains(p)", "alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s \" % (len(ibvhs),", "Simple bounding box hierarchy for the given polints. # # Usage: # ```", "self._bb = BoundingBox(points) self._children = [] self._line = None self._createChildren(points, params) ## Return", "= np.random.rand(4, 2) cv2 = np.random.rand(4, 2) ps1 = splinePoints(cv1) ps2 = splinePoints(cv2)", "points_left = points[:len(points) / 2 + 1] points_right = points[len(points) / 2:] params_left", "polyline. def points(self): return self._points ## Return point at the given parameter. def", "normVectors(diff_cvs) al_total = np.sum(dist_cvs) params = np.zeros(len(cvs)) al = 0 for pi in", "= y_new return ps if __name__ == '__main__': import matplotlib.pyplot as plt from", "points with the given polyline. # # BVH structure is used for fast", "bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line) if ip is", "__init__(self, points): self.create(points) ## Create 2D polyline with the given points. def create(self,", "x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new) ps = np.zeros((num_points, 2)) ps[:,", "## Compute arc length parameters. def computeParameters(self): cvs = self._points diff_cvs = cvs[1:,", "its: ip = self.pointAt(it) print ip plt.plot(ip[0] + 0.001, ip[1] + 0.001, \"x\",", "len(points) < 5: self._points = points self._params = params self._param_range = [np.min(params), np.max(params)]", "= self._points diff_cvs = cvs[1:, :] - cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total", "+ 1), BVH(points_right, params_right, self._level + 1)] ## Implementation of 2D polyline. class", "None: ibvhs.extend(ibvh) return ibvhs else: return None return None ## Plot BVH. def", "params, level=0): self._level = level self._bb = BoundingBox(points) self._children = [] self._line =", "= cvs[1:, :] - cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs) params", "= self._bvh.intersect(pl._bvh) if ibvhs is None: return its = [] for ibvh1, ibvh2,", "color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s \" % (len(ibvhs), its)) for it in", "/ 2 + 1] params_right = params[len(points) / 2:] self._children = [BVH(points_left, params_left,", "ibvhs: # print ip # ``` class BVH: ## Constructor def __init__(self, points,", "# # Implementation of 2D polyline. # @author tody # @date 2015/08/12 import", "= spl(x_new) ps = np.zeros((num_points, 2)) ps[:, 0] = x_new ps[:, 1] =", "## Constructor def __init__(self, points, params, level=0): self._level = level self._bb = BoundingBox(points)", "p_min) return None ## Compute arc length parameters. def computeParameters(self): cvs = self._points", "\"x\", color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:, 0],", "plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return for bvh in", "dist_cvs[pi] params[pi + 1] = al if al_total > 0.00001: params *= (1.0", "- t_min) / (t_max - t_min) return p_min + pt * (p_max -", "= points[len(points) / 2:] params_left = params[:len(points) / 2 + 1] params_right =", "given polints. # # Usage: # ``` # bvh1 = BVH(points1) # bvh2", "intersected points with the given polyline. # # BVH structure is used for", "import normVectors ## Simple bounding box hierarchy for the given polints. # #", "self._params = params ## Find intersected points with the given polyline. # #", "def points(self): return self._points ## Return the children in the node. def children(self):", "t_ranges): if t_max > t: pt = (t - t_min) / (t_max -", "else: ibvhs = [] for self_ch in self.children(): for bvh_ch in bvh.children(): ibvh", "``` class BVH: ## Constructor def __init__(self, points, params, level=0): self._level = level", "plt.plot(ip[0] + 0.001, ip[1] + 0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate", "== 0 ## Return the points in the node. def points(self): return self._points", "polyline. def plotPolyline(self, plt): ps = self._points plt.plot(ps[:, 0], ps[:, 1], \"-\") ##", "bounding box hierarchy for the given polints. # # Usage: # ``` #", "y_new return ps if __name__ == '__main__': import matplotlib.pyplot as plt from inversetoon.plot.window", "t_min) return p_min + pt * (p_max - p_min) return None ## Compute", "= points[:len(points) / 2 + 1] points_right = points[len(points) / 2:] params_left =", "np.zeros(len(cvs)) al = 0 for pi in range(len(cvs) - 1): al += dist_cvs[pi]", "return None ## Plot BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha)", "in the node. def children(self): if self.isLeaf(): return [self] return self._children ## Return", "Line from inversetoon.util.timer import timing_func from inversetoon.np.norm import normVectors ## Simple bounding box", "leaf. def isLeaf(self): return len(self._children) == 0 ## Return the points in the", "[(self, bvh, ip, it)] else: ibvhs = [] for self_ch in self.children(): for", "params self._param_range = [np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1]) return points_left = points[:len(points)", "= (1.0 - ilt) * t_min + ilt * t_max return [(self, bvh,", "from inversetoon.geometry.line import Line from inversetoon.util.timer import timing_func from inversetoon.np.norm import normVectors ##", "< 5: self._points = points self._params = params self._param_range = [np.min(params), np.max(params)] self._line", "= BoundingBox(points) self._children = [] self._line = None self._createChildren(points, params) ## Return if", "self_ch in self.children(): for bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh is", "the children in the node. def children(self): if self.isLeaf(): return [self] return self._children", "diff_cvs = cvs[1:, :] - cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs)", "self._bvh.plotBVH(plt, color=color) ## Plot intersection with BVH structure. def plotIntersection(self, plt, pl): ibvhs", "plt.title(\"Num intersections: %s %s \" % (len(ibvhs), its)) for it in its: ip", "= 0 for pi in range(len(cvs) - 1): al += dist_cvs[pi] params[pi +", "= Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt) pl1.plotBVH(plt, color=\"b\") pl2.plotBVH(plt, color=\"g\") pl1.plotIntersection(plt, pl2)", "normVectors ## Simple bounding box hierarchy for the given polints. # # Usage:", "return [(self, bvh, ip, it)] else: ibvhs = [] for self_ch in self.children():", "not None: ibvhs.extend(ibvh) return ibvhs else: return None return None ## Plot BVH.", "/ (t_max - t_min) return p_min + pt * (p_max - p_min) return", "## Plot BVH structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection", "the node. def children(self): if self.isLeaf(): return [self] return self._children ## Return true", "ibvh1, ibvh2, ip, it in ibvhs] return ips ## Plot polyline. def plotPolyline(self,", "points, params, level=0): self._level = level self._bb = BoundingBox(points) self._children = [] self._line", "return ips ## Plot polyline. def plotPolyline(self, plt): ps = self._points plt.plot(ps[:, 0],", "self._points p_ranges = zip(points[:-1], points[1:]) for (p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges):", "* t_max return [(self, bvh, ip, it)] else: ibvhs = [] for self_ch", "al += dist_cvs[pi] params[pi + 1] = al if al_total > 0.00001: params", "p_ranges = zip(points[:-1], points[1:]) for (p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges): if", "the given points. def create(self, points): points = np.array(points) self._points = points self.computeParameters()", "_createChildren(self, points, params): if len(points) < 5: self._points = points self._params = params", "self._points ## Return the children in the node. def children(self): if self.isLeaf(): return", "the node is leaf. def isLeaf(self): return len(self._children) == 0 ## Return the", "ibvhs] return ips ## Plot polyline. def plotPolyline(self, plt): ps = self._points plt.plot(ps[:,", "from scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb = BoundingBox(cvs)", "Line(self._points[0], self._points[-1]) return points_left = points[:len(points) / 2 + 1] points_right = points[len(points)", "# BVH structure is used for fast intersection. def intersect(self, pl): ibvhs =", "bvh2 = BVH(points2) # ibvhs = bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip in", "intersection with BVH structure. def plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs", "self_ch.intersect(bvh_ch) if ibvh is not None: ibvhs.extend(ibvh) return ibvhs else: return None return", "Find intersections with the given BVH structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb): if", "of 2D polyline. # @author tody # @date 2015/08/12 import numpy as np", "ip[1] + 0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline spl", "splinePoints(cv1) ps2 = splinePoints(cv2) pl1 = Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt) pl1.plotBVH(plt,", "return points_left = points[:len(points) / 2 + 1] points_right = points[len(points) / 2:]", "def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return for bvh", "structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection with BVH structure.", "ibvh is not None: ibvhs.extend(ibvh) return ibvhs else: return None return None ##", "None self._createChildren(points, params) ## Return if the node is leaf. def isLeaf(self): return", "if self.isLeaf(): return for bvh in self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points, params):", "# print ip # ``` class BVH: ## Constructor def __init__(self, points, params,", "ips ## Plot polyline. def plotPolyline(self, plt): ps = self._points plt.plot(ps[:, 0], ps[:,", "t: pt = (t - t_min) / (t_max - t_min) return p_min +", "hierarchy for the given polints. # # Usage: # ``` # bvh1 =", "ip, it)] else: ibvhs = [] for self_ch in self.children(): for bvh_ch in", "= normVectors(diff_cvs) al_total = np.sum(dist_cvs) params = np.zeros(len(cvs)) al = 0 for pi", "# # BVH structure is used for fast intersection. def intersect(self, pl): ibvhs", "points): self.create(points) ## Create 2D polyline with the given points. def create(self, points):", "if the node is leaf. def isLeaf(self): return len(self._children) == 0 ## Return", "= (t - t_min) / (t_max - t_min) return p_min + pt *", "return None ## Compute arc length parameters. def computeParameters(self): cvs = self._points diff_cvs", "points on the polyline. def points(self): return self._points ## Return point at the", "@author tody # @date 2015/08/12 import numpy as np from inversetoon.geometry.bounding_box import BoundingBox", "= zip(params[:-1], params[1:]) points = self._points p_ranges = zip(points[:-1], points[1:]) for (p_min, p_max),", "and self.isLeaf(): ip = self._line.intersect(bvh._line) if ip is not None: ilt = self._line.closestParam(ip)", "= BVH(points2) # ibvhs = bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip in ibvhs:", "params[:len(points) / 2 + 1] params_right = params[len(points) / 2:] self._children = [BVH(points_left,", "(len(ibvhs), its)) for it in its: ip = self.pointAt(it) print ip plt.plot(ip[0] +", "given points. def create(self, points): points = np.array(points) self._points = points self.computeParameters() self._bvh", "## Implementation of 2D polyline. class Polyline: ## Constructor def __init__(self, points): self.create(points)", "arc length parameters. def computeParameters(self): cvs = self._points diff_cvs = cvs[1:, :] -", "pt * (p_max - p_min) return None ## Compute arc length parameters. def", "color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s \" %", "\" % (len(ibvhs), its)) for it in its: ip = self.pointAt(it) print ip", "box hierarchy for the given polints. # # Usage: # ``` # bvh1", "ip in ibvhs: # print ip # ``` class BVH: ## Constructor def", "ibvh1, ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0],", "if ip is not None: ilt = self._line.closestParam(ip) t_min, t_max = self._param_range it", "point at the given parameter. def pointAt(self, t): params = self._params t_ranges =", "[ip for ibvh1, ibvh2, ip, it in ibvhs] return ips ## Plot polyline.", "% (len(ibvhs), its)) for it in its: ip = self.pointAt(it) print ip plt.plot(ip[0]", "2 + 1] points_right = points[len(points) / 2:] params_left = params[:len(points) / 2", "ps[:, 0] = x_new ps[:, 1] = y_new return ps if __name__ ==", "zip(points[:-1], points[1:]) for (p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges): if t_max >", "its)) for it in its: ip = self.pointAt(it) print ip plt.plot(ip[0] + 0.001,", "BVH structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip =", "t_min + ilt * t_max return [(self, bvh, ip, it)] else: ibvhs =", "used for fast intersection. def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips = [ip", "cvs[1:, :] - cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs) params =", "= [ip for ibvh1, ibvh2, ip, it in ibvhs] return ips ## Plot", "= [BVH(points_left, params_left, self._level + 1), BVH(points_right, params_right, self._level + 1)] ## Implementation", "- p_min) return None ## Compute arc length parameters. def computeParameters(self): cvs =", "1] points_right = points[len(points) / 2:] params_left = params[:len(points) / 2 + 1]", "return ibvhs else: return None return None ## Plot BVH. def plotBVH(self, plt,", "## Find intersections with the given BVH structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb):", "given point is included in the node. def contains(self, p): return self._bb.contains(p) ##", "= points self._params = params self._param_range = [np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1])", "in ibvhs: # print ip # ``` class BVH: ## Constructor def __init__(self,", "0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:,", "None: ilt = self._line.closestParam(ip) t_min, t_max = self._param_range it = (1.0 - ilt)", "def contains(self, p): return self._bb.contains(p) ## Find intersections with the given BVH structure.", "bvh, ip, it)] else: ibvhs = [] for self_ch in self.children(): for bvh_ch", "= params[:len(points) / 2 + 1] params_right = params[len(points) / 2:] self._children =", "__init__(self, points, params, level=0): self._level = level self._bb = BoundingBox(points) self._children = []", "polyline with the given points. def create(self, points): points = np.array(points) self._points =", "with the given polyline. # # BVH structure is used for fast intersection.", "BVH(points2) # ibvhs = bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip in ibvhs: #", "al_total = np.sum(dist_cvs) params = np.zeros(len(cvs)) al = 0 for pi in range(len(cvs)", "self._points[-1]) return points_left = points[:len(points) / 2 + 1] points_right = points[len(points) /", "points in the node. def points(self): return self._points ## Return the children in", "included in the node. def contains(self, p): return self._bb.contains(p) ## Find intersections with", "ip = self._line.intersect(bvh._line) if ip is not None: ilt = self._line.closestParam(ip) t_min, t_max", "params_left = params[:len(points) / 2 + 1] params_right = params[len(points) / 2:] self._children", "al_total) self._params = params ## Find intersected points with the given polyline. #", "points self.computeParameters() self._bvh = BVH(points, self._params) ## Return points on the polyline. def", "== '__main__': import matplotlib.pyplot as plt from inversetoon.plot.window import showMaximize cv1 = np.random.rand(4,", "(t_max - t_min) return p_min + pt * (p_max - p_min) return None", "spl(x_new) ps = np.zeros((num_points, 2)) ps[:, 0] = x_new ps[:, 1] = y_new", "None ## Compute arc length parameters. def computeParameters(self): cvs = self._points diff_cvs =", "ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1],", "return self._bb.contains(p) ## Find intersections with the given BVH structure. def intersect(self, bvh):", "for it in its: ip = self.pointAt(it) print ip plt.plot(ip[0] + 0.001, ip[1]", "Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt) pl1.plotBVH(plt, color=\"b\") pl2.plotBVH(plt, color=\"g\") pl1.plotIntersection(plt, pl2) showMaximize()", "self._param_range = [np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1]) return points_left = points[:len(points) /", "+ ilt * t_max return [(self, bvh, ip, it)] else: ibvhs = []", "for ibvh1, ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2)", "self._line.closestParam(ip) t_min, t_max = self._param_range it = (1.0 - ilt) * t_min +", "the node. def contains(self, p): return self._bb.contains(p) ## Find intersections with the given", "# ``` class BVH: ## Constructor def __init__(self, points, params, level=0): self._level =", "parameters. def computeParameters(self): cvs = self._points diff_cvs = cvs[1:, :] - cvs[:-1, :]", "color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s", "## Return points on the polyline. def points(self): return self._points ## Return point", "ibvhs.extend(ibvh) return ibvhs else: return None return None ## Plot BVH. def plotBVH(self,", "import Line from inversetoon.util.timer import timing_func from inversetoon.np.norm import normVectors ## Simple bounding", "(p_max - p_min) return None ## Compute arc length parameters. def computeParameters(self): cvs", "return ps if __name__ == '__main__': import matplotlib.pyplot as plt from inversetoon.plot.window import", "self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line) if ip is not None:", "if t_max > t: pt = (t - t_min) / (t_max - t_min)", "not None: ilt = self._line.closestParam(ip) t_min, t_max = self._param_range it = (1.0 -", "[] self._line = None self._createChildren(points, params) ## Return if the node is leaf.", "= params[len(points) / 2:] self._children = [BVH(points_left, params_left, self._level + 1), BVH(points_right, params_right,", "alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s", "pl1 = Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt) pl2.plotPolyline(plt) pl1.plotBVH(plt, color=\"b\") pl2.plotBVH(plt, color=\"g\") pl1.plotIntersection(plt,", "# # Usage: # ``` # bvh1 = BVH(points1) # bvh2 = BVH(points2)", "= [np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1]) return points_left = points[:len(points) / 2", "* t_min + ilt * t_max return [(self, bvh, ip, it)] else: ibvhs", "self._createChildren(points, params) ## Return if the node is leaf. def isLeaf(self): return len(self._children)", "range(len(cvs) - 1): al += dist_cvs[pi] params[pi + 1] = al if al_total", "pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is None: return its = [] for", "t_min, t_max = self._param_range it = (1.0 - ilt) * t_min + ilt", "## Return point at the given parameter. def pointAt(self, t): params = self._params", "return self._points ## Return the children in the node. def children(self): if self.isLeaf():", "> t: pt = (t - t_min) / (t_max - t_min) return p_min", "ibvhs = self._bvh.intersect(pl._bvh) ips = [ip for ibvh1, ibvh2, ip, it in ibvhs]", "for fast intersection. def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips = [ip for", "[] for self_ch in self.children(): for bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if", "of 2D polyline. class Polyline: ## Constructor def __init__(self, points): self.create(points) ## Create", "num_points=100): from scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb =", "true if the given point is included in the node. def contains(self, p):", "self._params = params self._param_range = [np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1]) return points_left", ":] dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs) params = np.zeros(len(cvs)) al = 0", "p_max), (t_min, t_max) in zip(p_ranges, t_ranges): if t_max > t: pt = (t", "polyline. class Polyline: ## Constructor def __init__(self, points): self.create(points) ## Create 2D polyline", "+ 0.001, ip[1] + 0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate import", "def create(self, points): points = np.array(points) self._points = points self.computeParameters() self._bvh = BVH(points,", "## Return the children in the node. def children(self): if self.isLeaf(): return [self]", "contains(self, p): return self._bb.contains(p) ## Find intersections with the given BVH structure. def", "1)] ## Implementation of 2D polyline. class Polyline: ## Constructor def __init__(self, points):", "%s \" % (len(ibvhs), its)) for it in its: ip = self.pointAt(it) print", "np.max(params)] self._line = Line(self._points[0], self._points[-1]) return points_left = points[:len(points) / 2 + 1]", "polyline. # @author tody # @date 2015/08/12 import numpy as np from inversetoon.geometry.bounding_box", "in zip(p_ranges, t_ranges): if t_max > t: pt = (t - t_min) /", "= UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points)", "0.00001: params *= (1.0 / al_total) self._params = params ## Find intersected points", "timing_func from inversetoon.np.norm import normVectors ## Simple bounding box hierarchy for the given", "it = (1.0 - ilt) * t_min + ilt * t_max return [(self,", "Create 2D polyline with the given points. def create(self, points): points = np.array(points)", "ibvhs is None: return its = [] for ibvh1, ibvh2, ip, it in", "# ``` # bvh1 = BVH(points1) # bvh2 = BVH(points2) # ibvhs =", "intersections with the given BVH structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf()", "Return points on the polyline. def points(self): return self._points ## Return point at", "given parameter. def pointAt(self, t): params = self._params t_ranges = zip(params[:-1], params[1:]) points", "plt): ps = self._points plt.plot(ps[:, 0], ps[:, 1], \"-\") ## Plot BVH structure.", "len(self._children) == 0 ## Return the points in the node. def points(self): return", "None: return its = [] for ibvh1, ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt,", "Usage: # ``` # bvh1 = BVH(points1) # bvh2 = BVH(points2) # ibvhs", "(p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges): if t_max > t: pt =", "is included in the node. def contains(self, p): return self._bb.contains(p) ## Find intersections", "dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs) params = np.zeros(len(cvs)) al = 0 for", "is used for fast intersection. def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips =", "it in ibvhs] return ips ## Plot polyline. def plotPolyline(self, plt): ps =", "points, params): if len(points) < 5: self._points = points self._params = params self._param_range", "structure. def plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is None: return", "= self._params t_ranges = zip(params[:-1], params[1:]) points = self._points p_ranges = zip(points[:-1], points[1:])", "ip plt.plot(ip[0] + 0.001, ip[1] + 0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100): from", "coding: utf-8 -*- ## @package inversetoon.geometry.polyline # # Implementation of 2D polyline. #", "params[1:]) points = self._points p_ranges = zip(points[:-1], points[1:]) for (p_min, p_max), (t_min, t_max)", "bvh in self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points, params): if len(points) < 5:", ":] - cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs) params = np.zeros(len(cvs))", "from inversetoon.np.norm import normVectors ## Simple bounding box hierarchy for the given polints.", "else: return None return None ## Plot BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05):", "self._children = [BVH(points_left, params_left, self._level + 1), BVH(points_right, params_right, self._level + 1)] ##", "polyline. # # BVH structure is used for fast intersection. def intersect(self, pl):", "cvs[:, 1]) bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new)", "## Return the points in the node. def points(self): return self._points ## Return", "self._children ## Return true if the given point is included in the node.", "= np.sum(dist_cvs) params = np.zeros(len(cvs)) al = 0 for pi in range(len(cvs) -", "the points in the node. def points(self): return self._points ## Return the children", "1] params_right = params[len(points) / 2:] self._children = [BVH(points_left, params_left, self._level + 1),", "def points(self): return self._points ## Return point at the given parameter. def pointAt(self,", "params ## Find intersected points with the given polyline. # # BVH structure", "ibvhs = [] for self_ch in self.children(): for bvh_ch in bvh.children(): ibvh =", "return self._points ## Return point at the given parameter. def pointAt(self, t): params", "if self.isLeaf(): return [self] return self._children ## Return true if the given point", "'__main__': import matplotlib.pyplot as plt from inversetoon.plot.window import showMaximize cv1 = np.random.rand(4, 2)", "pt = (t - t_min) / (t_max - t_min) return p_min + pt", "splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb", "plotPolyline(self, plt): ps = self._points plt.plot(ps[:, 0], ps[:, 1], \"-\") ## Plot BVH", "from inversetoon.plot.window import showMaximize cv1 = np.random.rand(4, 2) cv2 = np.random.rand(4, 2) ps1", "= points self.computeParameters() self._bvh = BVH(points, self._params) ## Return points on the polyline.", "import timing_func from inversetoon.np.norm import normVectors ## Simple bounding box hierarchy for the", "bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip in ibvhs: # print ip # ```", "color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return for bvh in self.children(): bvh.plotBVH(plt,", "+ 1] params_right = params[len(points) / 2:] self._children = [BVH(points_left, params_left, self._level +", "pl): ibvhs = self._bvh.intersect(pl._bvh) ips = [ip for ibvh1, ibvh2, ip, it in", "import matplotlib.pyplot as plt from inversetoon.plot.window import showMaximize cv1 = np.random.rand(4, 2) cv2", "ilt * t_max return [(self, bvh, ip, it)] else: ibvhs = [] for", "node. def contains(self, p): return self._bb.contains(p) ## Find intersections with the given BVH", "zip(p_ranges, t_ranges): if t_max > t: pt = (t - t_min) / (t_max", "= self._points plt.plot(ps[:, 0], ps[:, 1], \"-\") ## Plot BVH structure. def plotBVH(self,", "def __init__(self, points, params, level=0): self._level = level self._bb = BoundingBox(points) self._children =", "= self._bvh.intersect(pl._bvh) ips = [ip for ibvh1, ibvh2, ip, it in ibvhs] return", "# bvh2 = BVH(points2) # ibvhs = bvh1.intersect(bvh2) # for ibvh1, ibvh2, ip", "print ip # ``` class BVH: ## Constructor def __init__(self, points, params, level=0):", "structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line)", "+ 1)] ## Implementation of 2D polyline. class Polyline: ## Constructor def __init__(self,", "Plot polyline. def plotPolyline(self, plt): ps = self._points plt.plot(ps[:, 0], ps[:, 1], \"-\")", "return self._children ## Return true if the given point is included in the", "## @package inversetoon.geometry.polyline # # Implementation of 2D polyline. # @author tody #", "isLeaf(self): return len(self._children) == 0 ## Return the points in the node. def", "BVH(points1) # bvh2 = BVH(points2) # ibvhs = bvh1.intersect(bvh2) # for ibvh1, ibvh2,", "2D polyline. # @author tody # @date 2015/08/12 import numpy as np from", "-*- ## @package inversetoon.geometry.polyline # # Implementation of 2D polyline. # @author tody", "self.computeParameters() self._bvh = BVH(points, self._params) ## Return points on the polyline. def points(self):", "in the node. def points(self): return self._points ## Return the children in the", "is leaf. def isLeaf(self): return len(self._children) == 0 ## Return the points in", "return for bvh in self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points, params): if len(points)", "al = 0 for pi in range(len(cvs) - 1): al += dist_cvs[pi] params[pi", "intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips = [ip for ibvh1, ibvh2, ip, it", "0 ## Return the points in the node. def points(self): return self._points ##", "inversetoon.geometry.line import Line from inversetoon.util.timer import timing_func from inversetoon.np.norm import normVectors ## Simple", "Return the children in the node. def children(self): if self.isLeaf(): return [self] return", "with the given points. def create(self, points): points = np.array(points) self._points = points", "from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import Line from inversetoon.util.timer import timing_func from", "computeParameters(self): cvs = self._points diff_cvs = cvs[1:, :] - cvs[:-1, :] dist_cvs =", "+ 1] = al if al_total > 0.00001: params *= (1.0 / al_total)", "self.isLeaf(): return for bvh in self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points, params): if", "pointAt(self, t): params = self._params t_ranges = zip(params[:-1], params[1:]) points = self._points p_ranges", "self._points ## Return point at the given parameter. def pointAt(self, t): params =", "in ibvhs] return ips ## Plot polyline. def plotPolyline(self, plt): ps = self._points", "def pointAt(self, t): params = self._params t_ranges = zip(params[:-1], params[1:]) points = self._points", "scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb = BoundingBox(cvs) x_new", "if __name__ == '__main__': import matplotlib.pyplot as plt from inversetoon.plot.window import showMaximize cv1", "1): al += dist_cvs[pi] params[pi + 1] = al if al_total > 0.00001:", "points[:len(points) / 2 + 1] points_right = points[len(points) / 2:] params_left = params[:len(points)", "[self] return self._children ## Return true if the given point is included in", "@package inversetoon.geometry.polyline # # Implementation of 2D polyline. # @author tody # @date", "it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\")", "2:] self._children = [BVH(points_left, params_left, self._level + 1), BVH(points_right, params_right, self._level + 1)]", "params[len(points) / 2:] self._children = [BVH(points_left, params_left, self._level + 1), BVH(points_right, params_right, self._level", "ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s \" % (len(ibvhs), its)) for", "plt.plot(ps[:, 0], ps[:, 1], \"-\") ## Plot BVH structure. def plotBVH(self, plt, color=\"b\"):", "ps if __name__ == '__main__': import matplotlib.pyplot as plt from inversetoon.plot.window import showMaximize", "= [] for ibvh1, ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt,", "## Find intersected points with the given polyline. # # BVH structure is", "## Plot BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf():", "pi in range(len(cvs) - 1): al += dist_cvs[pi] params[pi + 1] = al", "def children(self): if self.isLeaf(): return [self] return self._children ## Return true if the", "in the node. def contains(self, p): return self._bb.contains(p) ## Find intersections with the", "= x_new ps[:, 1] = y_new return ps if __name__ == '__main__': import", "import numpy as np from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import Line from", "for ibvh1, ibvh2, ip in ibvhs: # print ip # ``` class BVH:", "self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points, params): if len(points) < 5: self._points =", "bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line) if ip is not None: ilt =", "points[len(points) / 2:] params_left = params[:len(points) / 2 + 1] params_right = params[len(points)", "if len(points) < 5: self._points = points self._params = params self._param_range = [np.min(params),", "/ al_total) self._params = params ## Find intersected points with the given polyline.", "bb.max()[0], num_points) y_new = spl(x_new) ps = np.zeros((num_points, 2)) ps[:, 0] = x_new", "= np.array(points) self._points = points self.computeParameters() self._bvh = BVH(points, self._params) ## Return points", "ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\", alpha=0.2) plt.plot(ip[0], ip[1], \"o\", color=\"r\") its.append(it) plt.title(\"Num intersections:", "params_right, self._level + 1)] ## Implementation of 2D polyline. class Polyline: ## Constructor", "Compute arc length parameters. def computeParameters(self): cvs = self._points diff_cvs = cvs[1:, :]", "it)] else: ibvhs = [] for self_ch in self.children(): for bvh_ch in bvh.children():", "= self._line.intersect(bvh._line) if ip is not None: ilt = self._line.closestParam(ip) t_min, t_max =", "1), BVH(points_right, params_right, self._level + 1)] ## Implementation of 2D polyline. class Polyline:", "color) def _createChildren(self, points, params): if len(points) < 5: self._points = points self._params", "the given polyline. # # BVH structure is used for fast intersection. def", "-*- coding: utf-8 -*- ## @package inversetoon.geometry.polyline # # Implementation of 2D polyline.", "plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection with BVH structure. def plotIntersection(self,", "ps1 = splinePoints(cv1) ps2 = splinePoints(cv2) pl1 = Polyline(ps1) pl2 = Polyline(ps2) pl1.plotPolyline(plt)", "cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs) params = np.zeros(len(cvs)) al =", "t_ranges = zip(params[:-1], params[1:]) points = self._points p_ranges = zip(points[:-1], points[1:]) for (p_min,", "color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:,", "inversetoon.np.norm import normVectors ## Simple bounding box hierarchy for the given polints. #", "return p_min + pt * (p_max - p_min) return None ## Compute arc", "if ibvh is not None: ibvhs.extend(ibvh) return ibvhs else: return None return None", "= self_ch.intersect(bvh_ch) if ibvh is not None: ibvhs.extend(ibvh) return ibvhs else: return None", "the given parameter. def pointAt(self, t): params = self._params t_ranges = zip(params[:-1], params[1:])", "ips = [ip for ibvh1, ibvh2, ip, it in ibvhs] return ips ##", "points(self): return self._points ## Return the children in the node. def children(self): if", "BVH(points_right, params_right, self._level + 1)] ## Implementation of 2D polyline. class Polyline: ##", "if bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line) if ip is not None: ilt", "ps = np.zeros((num_points, 2)) ps[:, 0] = x_new ps[:, 1] = y_new return", "the given BVH structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf():", "\"o\", color=\"r\") its.append(it) plt.title(\"Num intersections: %s %s \" % (len(ibvhs), its)) for it", "0 for pi in range(len(cvs) - 1): al += dist_cvs[pi] params[pi + 1]", "ibvh = self_ch.intersect(bvh_ch) if ibvh is not None: ibvhs.extend(ibvh) return ibvhs else: return", "points. def create(self, points): points = np.array(points) self._points = points self.computeParameters() self._bvh =", "self._level + 1), BVH(points_right, params_right, self._level + 1)] ## Implementation of 2D polyline.", "structure is used for fast intersection. def intersect(self, pl): ibvhs = self._bvh.intersect(pl._bvh) ips", "points self._params = params self._param_range = [np.min(params), np.max(params)] self._line = Line(self._points[0], self._points[-1]) return", "ip, it in ibvhs] return ips ## Plot polyline. def plotPolyline(self, plt): ps", "0.001, ip[1] + 0.001, \"x\", color=\"g\") def splinePoints(cvs, num_points=100): from scipy.interpolate import UnivariateSpline", "cvs = self._points diff_cvs = cvs[1:, :] - cvs[:-1, :] dist_cvs = normVectors(diff_cvs)", "parameter. def pointAt(self, t): params = self._params t_ranges = zip(params[:-1], params[1:]) points =", "if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line) if ip is not", "self.pointAt(it) print ip plt.plot(ip[0] + 0.001, ip[1] + 0.001, \"x\", color=\"g\") def splinePoints(cvs,", "t_max > t: pt = (t - t_min) / (t_max - t_min) return", "length parameters. def computeParameters(self): cvs = self._points diff_cvs = cvs[1:, :] - cvs[:-1,", "Implementation of 2D polyline. # @author tody # @date 2015/08/12 import numpy as", "> 0.00001: params *= (1.0 / al_total) self._params = params ## Find intersected", "spl = UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0],", "intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line) if ip", "/ 2:] params_left = params[:len(points) / 2 + 1] params_right = params[len(points) /", "= BVH(points1) # bvh2 = BVH(points2) # ibvhs = bvh1.intersect(bvh2) # for ibvh1,", "2D polyline. class Polyline: ## Constructor def __init__(self, points): self.create(points) ## Create 2D", "for bvh in self.children(): bvh.plotBVH(plt, color) def _createChildren(self, points, params): if len(points) <", "2:] params_left = params[:len(points) / 2 + 1] params_right = params[len(points) / 2:]", "Return the points in the node. def points(self): return self._points ## Return the", "Plot intersection with BVH structure. def plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if", "= self._param_range it = (1.0 - ilt) * t_min + ilt * t_max", "(t - t_min) / (t_max - t_min) return p_min + pt * (p_max", "Return true if the given point is included in the node. def contains(self,", "points[1:]) for (p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges): if t_max > t:", "polints. # # Usage: # ``` # bvh1 = BVH(points1) # bvh2 =", "inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import Line from inversetoon.util.timer import timing_func from inversetoon.np.norm", "* (p_max - p_min) return None ## Compute arc length parameters. def computeParameters(self):", "self._line = None self._createChildren(points, params) ## Return if the node is leaf. def", "return [self] return self._children ## Return true if the given point is included", "given polyline. # # BVH structure is used for fast intersection. def intersect(self,", "is None: return its = [] for ibvh1, ibvh2, ip, it in ibvhs:", "[] for ibvh1, ibvh2, ip, it in ibvhs: ibvh1.plotBVH(plt, color=\"r\", alpha=0.2) ibvh2.plotBVH(plt, color=\"r\",", "if ibvhs is None: return its = [] for ibvh1, ibvh2, ip, it", "1] = y_new return ps if __name__ == '__main__': import matplotlib.pyplot as plt", "at the given parameter. def pointAt(self, t): params = self._params t_ranges = zip(params[:-1],", "= np.zeros((num_points, 2)) ps[:, 0] = x_new ps[:, 1] = y_new return ps", "for self_ch in self.children(): for bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh", "2)) ps[:, 0] = x_new ps[:, 1] = y_new return ps if __name__", "BoundingBox(points) self._children = [] self._line = None self._createChildren(points, params) ## Return if the", "/ 2 + 1] points_right = points[len(points) / 2:] params_left = params[:len(points) /", "ip # ``` class BVH: ## Constructor def __init__(self, points, params, level=0): self._level", "Return if the node is leaf. def isLeaf(self): return len(self._children) == 0 ##", "- t_min) return p_min + pt * (p_max - p_min) return None ##", "self._points = points self._params = params self._param_range = [np.min(params), np.max(params)] self._line = Line(self._points[0],", "self._param_range it = (1.0 - ilt) * t_min + ilt * t_max return", "self._children = [] self._line = None self._createChildren(points, params) ## Return if the node", "level=0): self._level = level self._bb = BoundingBox(points) self._children = [] self._line = None", "- 1): al += dist_cvs[pi] params[pi + 1] = al if al_total >", "bvh_ch in bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh is not None: ibvhs.extend(ibvh) return", "Plot BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return", "- cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total = np.sum(dist_cvs) params = np.zeros(len(cvs)) al", "def plotPolyline(self, plt): ps = self._points plt.plot(ps[:, 0], ps[:, 1], \"-\") ## Plot", "num_points) y_new = spl(x_new) ps = np.zeros((num_points, 2)) ps[:, 0] = x_new ps[:,", "# -*- coding: utf-8 -*- ## @package inversetoon.geometry.polyline # # Implementation of 2D", "= None self._createChildren(points, params) ## Return if the node is leaf. def isLeaf(self):", "Constructor def __init__(self, points, params, level=0): self._level = level self._bb = BoundingBox(points) self._children", "return None return None ## Plot BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt,", "2015/08/12 import numpy as np from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import Line", "None return None ## Plot BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color,", "self._level = level self._bb = BoundingBox(points) self._children = [] self._line = None self._createChildren(points,", "BVH: ## Constructor def __init__(self, points, params, level=0): self._level = level self._bb =", "plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return for bvh in self.children():", "al_total > 0.00001: params *= (1.0 / al_total) self._params = params ## Find", "5: self._points = points self._params = params self._param_range = [np.min(params), np.max(params)] self._line =", "matplotlib.pyplot as plt from inversetoon.plot.window import showMaximize cv1 = np.random.rand(4, 2) cv2 =", "= BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new) ps = np.zeros((num_points,", "def intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and self.isLeaf(): ip = self._line.intersect(bvh._line) if", "BVH. def plotBVH(self, plt, color=\"b\", alpha=0.05): self._bb.plotBoundingBox(plt, color=color, alpha=alpha) if self.isLeaf(): return for", "def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot intersection with BVH structure. def", "BVH structure. def plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is None:", "def plotIntersection(self, plt, pl): ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is None: return its", "= np.zeros(len(cvs)) al = 0 for pi in range(len(cvs) - 1): al +=", "\"-\") ## Plot BVH structure. def plotBVH(self, plt, color=\"b\"): self._bvh.plotBVH(plt, color=color) ## Plot", "1]) bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0], bb.max()[0], num_points) y_new = spl(x_new) ps", "from inversetoon.util.timer import timing_func from inversetoon.np.norm import normVectors ## Simple bounding box hierarchy", "bvh.children(): ibvh = self_ch.intersect(bvh_ch) if ibvh is not None: ibvhs.extend(ibvh) return ibvhs else:", "@date 2015/08/12 import numpy as np from inversetoon.geometry.bounding_box import BoundingBox from inversetoon.geometry.line import", "with the given BVH structure. def intersect(self, bvh): if self._bb.intersects(bvh._bb): if bvh.isLeaf() and", "inversetoon.geometry.polyline # # Implementation of 2D polyline. # @author tody # @date 2015/08/12", "y_new = spl(x_new) ps = np.zeros((num_points, 2)) ps[:, 0] = x_new ps[:, 1]", "bvh.plotBVH(plt, color) def _createChildren(self, points, params): if len(points) < 5: self._points = points", "import BoundingBox from inversetoon.geometry.line import Line from inversetoon.util.timer import timing_func from inversetoon.np.norm import", "self._params) ## Return points on the polyline. def points(self): return self._points ## Return", "ibvhs = self._bvh.intersect(pl._bvh) if ibvhs is None: return its = [] for ibvh1,", "UnivariateSpline spl = UnivariateSpline(cvs[:, 0], cvs[:, 1]) bb = BoundingBox(cvs) x_new = np.linspace(bb.min()[0],", "## Simple bounding box hierarchy for the given polints. # # Usage: #", "in range(len(cvs) - 1): al += dist_cvs[pi] params[pi + 1] = al if", "self._points diff_cvs = cvs[1:, :] - cvs[:-1, :] dist_cvs = normVectors(diff_cvs) al_total =", "Find intersected points with the given polyline. # # BVH structure is used", "self._level + 1)] ## Implementation of 2D polyline. class Polyline: ## Constructor def", "points(self): return self._points ## Return point at the given parameter. def pointAt(self, t):" ]
[ "self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data = { 'nome': 'Gotham', 'regiao': self.regiao.id }", "data = { 'nome': 'Gotham', 'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code,", "APITestCase, APIClient from .fixture import RegiaoFactory, CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase): def", "self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade =", "status from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase, APIClient from .fixture import", "self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response =", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code,", "rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase, APIClient from", "data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15]))", "args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data =", "data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham')", "def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response", "test_perform_create(self): data = { 'nome': 'Gotham', 'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data)", "self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]),", "= APIClient() self.client = APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao =", "= self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao)", "args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome'])", "test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response =", "= Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data = { 'nome':", "{ 'nome': 'Gotham', 'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response", "self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>'", "_ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data = {", "self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self):", "status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase, APIClient", "City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail',", "= CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail',", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail',", "status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham", "= self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK)", "response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self):", "email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient() self.client = APIClient() token, _ = Token.objects.get_or_create(user=self.user)", "cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham City', 'regiao':", "self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient() self.client = APIClient()", "username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client =", "status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data", "self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data),", "import reverse from django.contrib.auth import get_user_model from rest_framework import status from rest_framework.authtoken.models import", "Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data = { 'nome': 'Gotham',", "response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code,", "data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao)", "get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user", "self.client = APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def", "= APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self):", "self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def", "regiao=self.regiao) data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data)", "self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code,", "test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK)", "= self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao)", "self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data", "email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient()", ") self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient() self.client =", "from .fixture import RegiaoFactory, CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user", "data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data)", "import RegiaoFactory, CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user(", "self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code,", "City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response", "= get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' )", "response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code,", "response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5,", "status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self):", "APIClient() self.client = APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1)", "response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 0)", "'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "password='<PASSWORD>' ) self.unath_client = APIClient() self.client = APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token", "= {'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data)", "Token from rest_framework.test import APITestCase, APIClient from .fixture import RegiaoFactory, CidadeFactory User =", "self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK)", "self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response =", "self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome':", "response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "cidade = CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome'])", "def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list'))", "= {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade = CidadeFactory.create(id=10,", "self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]),", "args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail',", "status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15,", "self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade", "regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data),", "data = {'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]),", "self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade", "args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22,", "data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code,", "= self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response", "from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase, APIClient from .fixture import RegiaoFactory,", "self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response =", "CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response =", "5) def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "import Token from rest_framework.test import APITestCase, APIClient from .fixture import RegiaoFactory, CidadeFactory User", "status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code,", "= self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade", "data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data)", "= self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response", "'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response =", "self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail',", "CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]),", "'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data)", "CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>',", "= self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK)", "self.unath_client = APIClient() self.client = APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao", "data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'],", "self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response", "self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city')", "'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list'))", "self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response =", ") self.unath_client = APIClient() self.client = APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')", "status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade =", "= User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient() self.client = APIClient() token,", "{token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data = { 'nome': 'Gotham', 'regiao': self.regiao.id", "} response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK)", "self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'],", "def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code,", "data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response =", "args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self):", "test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome,", "{'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code,", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(reverse('cidade-list'))", "status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code,", "data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def", "from rest_framework.test import APITestCase, APIClient from .fixture import RegiaoFactory, CidadeFactory User = get_user_model()", "import get_user_model from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.test import", "CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK)", "import APITestCase, APIClient from .fixture import RegiaoFactory, CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase):", "django.urls import reverse from django.contrib.auth import get_user_model from rest_framework import status from rest_framework.authtoken.models", "self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail', args=[15]))", "self.unath_client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.put(reverse('cidade-detail', args=[21]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'],", "response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21,", "{'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response", "'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response =", "self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list'))", "django.contrib.auth import get_user_model from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.test", "= RegiaoFactory.create(id=1) def test_perform_create(self): data = { 'nome': 'Gotham', 'regiao': self.regiao.id } response", "test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code,", "APIClient() token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data", "response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def", "cidade = CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail',", "self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response =", "User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client", "self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'],", "status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10]))", "'nome': 'Gotham', 'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response =", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome':", "self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham City'}", "test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response", "from django.contrib.auth import get_user_model from rest_framework import status from rest_framework.authtoken.models import Token from", "User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient() self.client = APIClient() token, _", "= self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome'])", "import status from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase, APIClient from .fixture", "args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome'])", "self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response", "regiao=self.regiao) data = {'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response = self.unath_client.put(reverse('cidade-detail',", "self.assertEqual(response.data['nome'], cidade.nome) def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham City',", "= self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response", "1) response = self.client.delete(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data),", "= { 'nome': 'Gotham', 'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'],", "def test_update(self): cidade = CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham City', 'regiao': self.regiao.id}", "reverse from django.contrib.auth import get_user_model from rest_framework import status from rest_framework.authtoken.models import Token", "CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user(", "from django.urls import reverse from django.contrib.auth import get_user_model from rest_framework import status from", "token, _ = Token.objects.get_or_create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data =", "= self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], cidade.nome)", "= self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response", "cidade = CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome']) response =", "= CidadeFactory.create(id=21, regiao=self.regiao) data = {'nome': 'Gotham City', 'regiao': self.regiao.id} self.assertNotEqual(cidade.nome, data['nome']) response", "self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def", "response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade = CidadeFactory.create(id=10, regiao=self.regiao)", "get_user_model from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase,", "password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient() self.client", "status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data =", "data['nome']) self.assertEqual(response.data['slug'], 'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham", "username='jane', email='<EMAIL>', password='<PASSWORD>' ) self.unath_client = APIClient() self.client = APIClient() token, _ =", "def test_perform_create(self): data = { 'nome': 'Gotham', 'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'),", "RegiaoFactory.create(id=1) def test_perform_create(self): data = { 'nome': 'Gotham', 'regiao': self.regiao.id } response =", "self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') self.regiao = RegiaoFactory.create(id=1) def test_perform_create(self): data = { 'nome': 'Gotham', 'regiao':", "data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response =", "APIClient from .fixture import RegiaoFactory, CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self):", "self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self): cidade =", "setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>',", "def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome, data['nome'])", "def setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane',", "'Gotham', 'regiao': self.regiao.id } response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'),", "status.HTTP_201_CREATED) self.assertEqual(response.data['nome'], data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code,", "response = self.client.patch(reverse('cidade-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['nome'], data['nome']) def test_destroy(self): CidadeFactory.create(id=15, regiao=self.regiao)", "= self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5) def test_retrieve(self):", "args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1) response = self.client.delete(reverse('cidade-detail',", "rest_framework.authtoken.models import Token from rest_framework.test import APITestCase, APIClient from .fixture import RegiaoFactory, CidadeFactory", "CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code,", "response = self.unath_client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.post(reverse('cidade-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data['nome'],", "response = self.unath_client.get(reverse('cidade-detail', args=[15])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 1)", "User = get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>'", "'gotham-city') def test_partial_update(self): cidade = CidadeFactory.create(id=22, regiao=self.regiao) data = {'nome': 'Gotham City'} self.assertNotEqual(cidade.nome,", "data['nome']) self.assertEqual(response.data['slug'], 'gotham') def test_list(self): CidadeFactory.create_batch(5, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response", "RegiaoFactory, CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user( username='bruce',", "= CidadeFactory.create(id=10, regiao=self.regiao) response = self.unath_client.get(reverse('cidade-detail', args=[10])) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-detail', args=[10]))", ".fixture import RegiaoFactory, CidadeFactory User = get_user_model() class CidadeViewSetTests(APITestCase): def setUp(self): self.user =", "class CidadeViewSetTests(APITestCase): def setUp(self): self.user = User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user =", "regiao=self.regiao) response = self.unath_client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) response = self.client.get(reverse('cidade-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(len(response.data), 5)", "rest_framework.test import APITestCase, APIClient from .fixture import RegiaoFactory, CidadeFactory User = get_user_model() class", "= User.objects.create_user( username='bruce', email='<EMAIL>', password='<PASSWORD>' ) self.anon_user = User.objects.create_user( username='jane', email='<EMAIL>', password='<PASSWORD>' )" ]
[ "si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 = 0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1)", "else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1):", "def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1) def abs(a):", "return (a-b+1) def abs(a): if(a>0): return a else: return -a s = input()", "input() s1 = [input().split() for i in range(int(s[0]))] si = [] for i", "if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1) def abs(a): if(a>0): return", "bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) -", "for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2])", "= print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): # aaa", "small(a, b): if(a > b): return b return a def bbb(a,b): if(a>b): return", "= [] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1): zentai", "[input().split() for i in range(int(s[0]))] si = [] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])])", "import numpy as np def big(a, b): if(a > b): return a return", "return b def small(a, b): if(a > b): return b return a def", "return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1) def abs(a): if(a>0): return a", "a return b def small(a, b): if(a > b): return b return a", "kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 = 0 else:", "a else: return -a s = input() s1 = [input().split() for i in", "bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1) def abs(a): if(a>0):", "0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in", "print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): # aaa =", "= input() s1 = [input().split() for i in range(int(s[0]))] si = [] for", "print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): # aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2) # #print((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2)", "sisumi(a,b): if(a>b): return (a-b+1) def abs(a): if(a>0): return a else: return -a s", "((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 =", "abs(a): if(a>0): return a else: return -a s = input() s1 = [input().split()", "for i in range(int(s[0]))] si = [] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si)", "= ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag", "= ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0):", "kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))", "def big(a, b): if(a > b): return a return b def small(a, b):", "0): kaburi_hoten_3 = 0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007)", "b): if(a > b): return b return a def bbb(a,b): if(a>b): return (a-b+1)**2", "for i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)", "kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): #", "i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai", "#for i in range(0,int(s),1): # aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2) # #print((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2) # print(((int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2)) #", "in range(int(s[0]))] si = [] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i", "return a return b def small(a, b): if(a > b): return b return", "flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 = 0 else: kaburi_hoten_3 = print(zentai)", "np def big(a, b): if(a > b): return a return b def small(a,", "s = input() s1 = [input().split() for i in range(int(s[0]))] si = []", "i in range(int(s[0]))] si = [] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for", "= (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 = 0 else: kaburi_hoten_3", "#kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag =>", "[] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1): zentai =", "range(int(s[0]))] si = [] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in", "(a-b+1) def abs(a): if(a>0): return a else: return -a s = input() s1", "return a def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1)", "> b): return a return b def small(a, b): if(a > b): return", "i in range(0,int(s),1): # aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2) # #print((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2) # print(((int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2)) # print(aaa%1000000007)", "return -a s = input() s1 = [input().split() for i in range(int(s[0]))] si", "if(a>b): return (a-b+1) def abs(a): if(a>0): return a else: return -a s =", "kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))", "numpy as np def big(a, b): if(a > b): return a return b", "((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3", "= si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 = 0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai)", "def sisumi(a,b): if(a>b): return (a-b+1) def abs(a): if(a>0): return a else: return -a", "<reponame>i14kawanaka/AtCoder import numpy as np def big(a, b): if(a > b): return a", "if(a > b): return b return a def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2", "= 0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i", "if(a > b): return a return b def small(a, b): if(a > b):", "in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2", "a def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1) def", "b): if(a > b): return a return b def small(a, b): if(a >", "print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): # aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2) #", "print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): # aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2) # #print((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2) #", "=> 0): kaburi_hoten_3 = 0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4)", "(big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 = 0", "range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2", "range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2]))", "(a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1) def abs(a): if(a>0): return a else:", "if(a>0): return a else: return -a s = input() s1 = [input().split() for", "si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1", "in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai =", "return a else: return -a s = input() s1 = [input().split() for i", "((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag =", "b): return a return b def small(a, b): if(a > b): return b", "s1 = [input().split() for i in range(int(s[0]))] si = [] for i in", "big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag", "- (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 =", "= [input().split() for i in range(int(s[0]))] si = [] for i in range(0,int(s),1):", "- big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2])", "b return a def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b): return", "print(si) for i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 =", "return b return a def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b): if(a>b):", "-a s = input() s1 = [input().split() for i in range(int(s[0]))] si =", "print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): # aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2) # #print((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2) # print(((int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2))", "i in range(0,int(s),1): zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) -", "si = [] for i in range(0,int(s),1): si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])]) print(si) for i in range(0,int(s),1):", "(big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2]) if(flag => 0): kaburi_hoten_3 = 0 else: kaburi_hoten_3 =", "b): return b return a def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def sisumi(a,b):", "else: return -a s = input() s1 = [input().split() for i in range(int(s[0]))]", "def small(a, b): if(a > b): return b return a def bbb(a,b): if(a>b):", "= ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1)) kaburi_hoten_2", "big(a, b): if(a > b): return a return b def small(a, b): if(a", "as np def big(a, b): if(a > b): return a return b def", "return(b-a+1)**2 def sisumi(a,b): if(a>b): return (a-b+1) def abs(a): if(a>0): return a else: return", "> b): return b return a def bbb(a,b): if(a>b): return (a-b+1)**2 return(b-a+1)**2 def", "kaburi_hoten_3 = 0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for", "print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2) print((si[i][0]-big(si[i][1],si[i][2])-1)*4) print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007) #for i in range(0,int(s),1): # aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2)", "if(flag => 0): kaburi_hoten_3 = 0 else: kaburi_hoten_3 = print(zentai) print(kaburi_zentai) print(kaburi_hoten_1) print(kaburi_hoten_2)", "def abs(a): if(a>0): return a else: return -a s = input() s1 =", "= bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)", "zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2]) kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2])) kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2 #kaburi_hoten_2 =", "b def small(a, b): if(a > b): return b return a def bbb(a,b):" ]
[ "utf-8 -*- # Generated by Django 1.10.3 on 2016-12-05 14:24 from __future__ import", "__future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('imageledger',", "# Generated by Django 1.10.3 on 2016-12-05 14:24 from __future__ import unicode_literals from", "import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'),", "Generated by Django 1.10.3 on 2016-12-05 14:24 from __future__ import unicode_literals from django.db", "import migrations class Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ] operations = [", "[ ('imageledger', '0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']}, ), migrations.AlterModelOptions(", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ] operations =", "on 2016-12-05 14:24 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions( name='favorite',", "by Django 1.10.3 on 2016-12-05 14:24 from __future__ import unicode_literals from django.db import", "Django 1.10.3 on 2016-12-05 14:24 from __future__ import unicode_literals from django.db import migrations", "14:24 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies =", "-*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-12-05 14:24 from", "from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [", "# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-12-05 14:24", "operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']}, ), migrations.AlterModelOptions( name='list', options={'ordering': ['-updated_on']}, ),", "2016-12-05 14:24 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies", "Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering':", "'0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']}, ), migrations.AlterModelOptions( name='list', options={'ordering':", "dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']},", "] operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']}, ), migrations.AlterModelOptions( name='list', options={'ordering': ['-updated_on']},", "-*- # Generated by Django 1.10.3 on 2016-12-05 14:24 from __future__ import unicode_literals", "migrations class Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions(", "1.10.3 on 2016-12-05 14:24 from __future__ import unicode_literals from django.db import migrations class", "unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ]", "= [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']}, ), migrations.AlterModelOptions( name='list', options={'ordering': ['-updated_on']}, ), ]", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('imageledger', '0010_auto_20161130_1814'), ] operations", "coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-12-05 14:24 from __future__", "= [ ('imageledger', '0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']}, ),", "('imageledger', '0010_auto_20161130_1814'), ] operations = [ migrations.AlterModelOptions( name='favorite', options={'ordering': ['-updated_on']}, ), migrations.AlterModelOptions( name='list'," ]
[ "'rhl': ['return', 'air', 'high', 'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'],", "\"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\":", "[\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"],", "[\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"],", "'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static',", "'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied',", "['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb':", "'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia':", "'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup',", "'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias':", "'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating',", "\"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\":", "'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum',", "\"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\":", "['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating',", "['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp':", "'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum',", "\"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\",", "\"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\":", "[\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\":", "'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode':", "\"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air',", "['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm',", "['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating',", "'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection',", "'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'],", "'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'],", "'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current', 'box',", "'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset': ['static',", "[\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\":", "'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration',", "'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'],", "'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating',", "'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat',", "'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge',", "\"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\",", "'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone', 'space',", "'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output',", "'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve',", "\"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\",", "'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm',", "'low', 'limit', 'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'],", "[\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"],", "[\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\",", "'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling',", "= { \"ahu\": [\"air\", \"handler\", \"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\",", "\"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\":", "\"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\":", "'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp':", "['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'],", "['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating',", "'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air', 'low', 'limit',", "'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return', 'air', 'high', 'limit'], 'retrb': ['return', 'reset', 'band'],", "'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc':", "'air', 'low', 'limit', 'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling',", "[\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"],", "'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'],", "<reponame>BrickSchema/reconciliation-api<gh_stars>1-10 abbrmap = { \"ahu\": [\"air\", \"handler\", \"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\":", "'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush':", "'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional',", "\"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"],", "['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq':", "'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs': ['return', 'fan', 'status'],", "'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air',", "'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'],", "\"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat':", "'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'],", "['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure',", "'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched':", "'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity', 'pressure'], 'clgocc': ['occ', 'cooling'], 'autocal': ['autocalibration'], }", "\"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\",", "'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating',", "'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'], 'mall': ['mixed',", "'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown',", "'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied',", "'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling',", "'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge',", "'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'],", "\"ahu\": [\"air\", \"handler\", \"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\":", "\"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"],", "'air', 'high', 'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer',", "\"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\":", "'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive',", "\"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"],", "\"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\":", "['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current',", "[\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\":", "\"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\",", "[\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'],", "'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature',", "'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'],", "['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm',", "[\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"],", "'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain':", "\"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"],", "'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating',", "'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl':", "'spit': ['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'],", "\"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\":", "['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'],", "'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return', 'air', 'high', 'limit'], 'retrb': ['return', 'reset',", "['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts': ['status', 'of', 'restart', 'delay'],", "'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone',", "['standby', 'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied',", "'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling',", "'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'],", "'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'],", "\"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\",", "\"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\":", "\"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"],", "'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'],", "'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc':", "'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts':", "'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc':", "'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan',", "'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge', 'low',", "'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity', 'pressure'], 'clgocc': ['occ',", "['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts': ['status',", "'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'],", "['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'],", "'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'],", "'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb':", "['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static',", "\"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"],", "['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum',", "'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'],", "['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied',", "['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity', 'pressure'], 'clgocc': ['occ', 'cooling'], 'autocal':", "\"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\",", "'minimum', 'position'], 'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'], 'mallit': ['mixed', 'air', 'low',", "'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated',", "'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply',", "'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied',", "'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin':", "\"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\":", "'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg':", "'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc':", "['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental',", "['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'],", "'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs': ['return', 'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds':", "['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water',", "'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane',", "\"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"],", "'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'],", "'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl':", "'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'],", "'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'],", "abbrmap = { \"ahu\": [\"air\", \"handler\", \"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\",", "\"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"],", "['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'],", "'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv':", "\"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\":", "'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs': ['return',", "['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl':", "'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan',", "'air', 'low', 'limit', 'dead', 'band'], 'mallit': ['mixed', 'air', 'low', 'limit', 'integration'], 'mallo': ['mixed',", "'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling',", "['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity', 'pressure'], 'clgocc':", "\"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\":", "['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply',", "['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf':", "'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'],", "'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo':", "'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'],", "'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'],", "\"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"],", "'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in',", "['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective',", "'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'], 'mallit': ['mixed', 'air', 'low', 'limit', 'integration'],", "'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'],", "'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge',", "'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating',", "[\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"],", "\"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\":", "'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating',", "'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure',", "['static', 'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'],", "'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ',", "'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied',", "'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'],", "['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax':", "'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return', 'air', 'high', 'limit'],", "[\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"],", "'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias':", "[\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"],", "'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve',", "[\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\",", "[\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\",", "'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'],", "['mixed', 'air', 'low', 'limit', 'dead', 'band'], 'mallit': ['mixed', 'air', 'low', 'limit', 'integration'], 'mallo':", "\"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\":", "['supply', 'fan', 'status'], 'rfs': ['return', 'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'],", "[\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\",", "'reset', 'band'], 'rhl': ['return', 'air', 'high', 'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer',", "\"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\":", "'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling',", "['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs': ['return', 'fan',", "[\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\":", "'air', 'low', 'limit', 'integration'], 'mallo': ['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air',", "'band'], 'rhl': ['return', 'air', 'high', 'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover',", "['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling',", "'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'],", "'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity', 'pressure'], 'clgocc': ['occ', 'cooling'], 'autocal': ['autocalibration'],", "'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity', 'pressure'],", "['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat',", "\"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\",", "['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog',", "'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling',", "'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs': ['return', 'fan', 'status'], 'smk': ['smoke',", "'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied',", "\"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"],", "['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed',", "['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air', 'low', 'limit', 'dead',", "['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling',", "['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp':", "'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia':", "[\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\",", "'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush',", "\"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\":", "[\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\":", "'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'],", "'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective',", "'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd',", "'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional',", "\"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\":", "'limit', 'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp':", "\"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\":", "'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns':", "'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental',", "'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog',", "'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll':", "'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode':", "'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm',", "'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby',", "'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts': ['status', 'of',", "'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow':", "[\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"],", "'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs': ['return', 'fan', 'status'], 'smk': ['smoke', 'detector'],", "[\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"],", "[\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\",", "[\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"],", "'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current', 'heating',", "['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'],", "['occupied', 'status'], 'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan',", "\"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"],", "['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied',", "'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'],", "'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active',", "'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput':", "'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'],", "['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer',", "'status'], 'rfs': ['return', 'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge',", "'occs': ['occupied', 'status'], 'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply',", "\"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\",", "'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf':", "\"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj':", "'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge',", "['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead',", "'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'],", "['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling',", "[\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\",", "\"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\":", "['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'], 'mallit': ['mixed', 'air',", "[\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\":", "'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'],", "[\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"],", "'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer',", "'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge',", "['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'],", "'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact':", "\"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"],", "[\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"],", "\"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\":", "'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling',", "'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied',", "'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'],", "\"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"],", "[\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"],", "'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp':", "[\"air\", \"handler\", \"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"],", "'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc':", "['mixed', 'air', 'low', 'limit', 'integration'], 'mallo': ['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed',", "['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active',", "\"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\":", "[\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"],", "\"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\",", "'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm',", "'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb':", "'mallo': ['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'],", "[\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone',", "'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'],", "'fan', 'status'], 'rfs': ['return', 'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb':", "'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum',", "['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge', 'low', 'limit'],", "\"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\":", "'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'],", "\"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\":", "\"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\",", "'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput':", "['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'], 'mall':", "'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp':", "\"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static',", "\"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\":", "'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge',", "[\"electrical\"], \"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\":", "'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional', 'band'], 'depb': ['discharge',", "\"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\":", "[\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'],", "[\"voltage\"], \"ctl\": [\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\",", "['current', 'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup',", "'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat',", "'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air', 'low',", "'low', 'limit', 'integration'], 'mallo': ['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low',", "[\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\",", "\"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"],", "['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static',", "'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset',", "'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure',", "'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum',", "'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec':", "[\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"],", "['discharge', 'reset', 'band'], 'rhl': ['return', 'air', 'high', 'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif':", "'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit':", "'integration'], 'mallo': ['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional',", "'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'],", "\"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"],", "['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network':", "'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs':", "[\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"], \"unocc\": [\"unoccupied\"], \"volt\": [\"voltage\"],", "'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box',", "'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone',", "'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating',", "'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'], 'mallit': ['mixed',", "\"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\":", "'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs':", "'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm',", "['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night',", "'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure',", "'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'], 'mallit':", "'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return', 'air', 'high', 'limit'], 'retrb': ['return',", "['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'wmupmin': ['warmup',", "[\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\",", "'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'],", "['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating',", "'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia':", "['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating',", "'cooling', 'proportional', 'band'], 'depb': ['discharge', 'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'],", "'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating',", "{ \"ahu\": [\"air\", \"handler\", \"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"],", "\"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\":", "'low', 'limit', 'dead', 'band'], 'mallit': ['mixed', 'air', 'low', 'limit', 'integration'], 'mallo': ['mixed', 'air',", "'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum',", "'dead', 'band'], 'mallit': ['mixed', 'air', 'low', 'limit', 'integration'], 'mallo': ['mixed', 'air', 'low', 'limit',", "\"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\": [\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\",", "'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc':", "\"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\",", "['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm',", "'bias'], 'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'], 'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias':", "['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply',", "'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position'], 'malldb':", "[\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\":", "\"handler\", \"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\":", "'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'],", "\"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"],", "'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias':", "'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit':", "['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling',", "'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional',", "'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure',", "'cooling', 'maximum', 'cfm', 'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia':", "'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset':", "\"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\":", "\"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\":", "\"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool',", "[\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\": [\"temperature\"],", "[\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'],", "['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby',", "'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return', 'air', 'high',", "'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf':", "\"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"],", "['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'],", "'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'],", "'economizer', 'proportional', 'band'], 'dhdb': ['discharge', 'heating', 'dead', 'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb':", "[\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\",", "'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb':", "'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'],", "['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp':", "'flow'], 'supflosp': ['calculated', 'cfm', 'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'],", "['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied',", "['mixed', 'air', 'low', 'limit', 'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied',", "[\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\":", "'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'],", "'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs': ['return', 'fan', 'status'], 'smk':", "\"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap':", "['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical',", "'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'],", "'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp':", "'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs': ['economizer',", "'limit', 'dead', 'band'], 'mallit': ['mixed', 'air', 'low', 'limit', 'integration'], 'mallo': ['mixed', 'air', 'low',", "'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied',", "\"unit\"], \"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"],", "['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box',", "['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating',", "'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated',", "[\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\":", "\"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"],", "['return', 'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'],", "['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration',", "\"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"],", "[\"return\"], \"hw\": [\"hot\", \"water\"], \"chw\": [\"chilled\", \"water\"], \"z\": [\"zone\"], \"zn\": [\"zone\"], 'mat': ['mixed',", "['static', 'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst':", "['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'],", "'high', 'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover',", "'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity',", "'heating', 'dead', 'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return',", "'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow',", "'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common',", "'signal'], 'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'], 'sfs': ['supply', 'fan', 'status'], 'rfs':", "[\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\",", "[\"control\"], \"cfm\": [\"flow\"], \"sa\": [\"supply\", \"air\"], \"ea\": [\"exhaust\", \"air\"], \"da\": [\"discharge\", \"air\"], \"oa\":", "'heating', 'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network',", "\"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\",", "['heating', 'valve', 'analog', 'signal'], 'clgo': ['cooling', 'valve', 'analog', 'signal'], 'sfo': ['supply', 'fan', 'inlet',", "'sfs': ['supply', 'fan', 'status'], 'rfs': ['return', 'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke',", "'mallit': ['mixed', 'air', 'low', 'limit', 'integration'], 'mallo': ['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb':", "['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'], 'econs':", "\"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"],", "'limit', 'integration'], 'mallo': ['mixed', 'air', 'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit',", "'mode'], 'htgmode': ['current', 'heating', 'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow':", "'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc':", "\"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"], \"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"],", "'command'], 'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'],", "['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'],", "\"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"], \"equip\":", "'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb': ['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'],", "'cfm', 'setpoint'], 'wmupmin': ['warmup', 'cfm', 'setpoint'], 'htgbias': ['active', 'heating', 'bias'], 'occhtgfl': ['occupied', 'heating',", "'enabled'], 'acreq': ['autocalibration', 'required'], 'acact': ['autocalibration', 'in', 'process'], 'effclg': ['effective', 'cooling', 'setpoint'], 'effhtg':", "'pressure', 'integration'], 'sppb': ['static', 'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static',", "'low', 'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'], 'mall': ['mixed', 'air',", "'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp': ['velocity', 'pressure'], 'clgocc': ['occ', 'cooling'],", "\"equip\": [\"equipment\"], \"freq\": [\"frequency\"], \"occ\": [\"occupied\"], \"rtu\": [\"rootftop\", \"unit\"], \"roof\": [\"rooftop\"], \"dmp\": [\"damper\"],", "\"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\",", "'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown', 'command'], 'wcc': ['warmup', 'command'],", "'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'],", "[\"supply\", \"fan\"], \"ef\": [\"exhaust\", \"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\",", "'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'],", "'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'], 'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm',", "'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'],", "'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo':", "\"air\"], \"da\": [\"discharge\", \"air\"], \"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\",", "'wcc': ['warmup', 'command'], 'econs': ['economizer', 'status'], 'occs': ['occupied', 'status'], 'restasts': ['status', 'of', 'restart',", "\"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\": [\"discharge\"], \"ex\": [\"exhaust\"], \"ret\":", "'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply',", "'spdb': ['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb':", "'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'],", "[\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\": [\"electrical\"],", "['zone', 'proportional', 'band'], 'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied',", "'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling', 'intergration'], 'dcpb': ['discharge', 'cooling', 'proportional',", "'band'], 'mall': ['mixed', 'air', 'lowlimit', 'setpoint'], 'ucncdf': ['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling',", "\"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"],", "'setpoint'], 'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'],", "'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'], 'htgoccminflow': ['occ',", "'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy', 'schedule'], 'saf': ['supply', 'airflow', 'rate'], 'vp':", "['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return', 'air', 'high', 'limit'], 'retrb':", "\"zn\": [\"zone\"], 'mat': ['mixed', 'air', 'temperature'], 'wcadj': ['warm/cool', 'adjust'], 'sap': ['static', 'pressure'], 'znt':", "'mode'], 'onochbia': ['unoccupied', 'heating', 'bias'], 'pkupgain': ['pickup', 'gain'], 'supflow': ['cfm', 'supply', 'flow'], 'supflosp':", "'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating',", "'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'],", "'limit', 'offset'], 'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'], 'mall': ['mixed', 'air', 'lowlimit',", "'cooling', 'setpoint'], 'effhtg': ['effective', 'heating', 'setpoint'], 'htgvlv': ['heating', 'valve'], 'htgocc': ['occ', 'heating', 'setpoint'],", "'sap': ['static', 'pressure'], 'znt': ['zone', 'space', 'temperature'], 'htgo': ['heating', 'valve', 'analog', 'signal'], 'clgo':", "'dead', 'band'], 'dll': ['discharge', 'low', 'limit'], 'disrb': ['discharge', 'reset', 'band'], 'rhl': ['return', 'air',", "'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'],", "'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling', 'deadband'], 'dcit': ['discharge', 'cooling',", "'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'], 'ocmncsp': ['occupied', 'cooling', 'minimum',", "\"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\", \"air\", \"pressure\"], \"sf\": [\"supply\", \"fan\"], \"ef\": [\"exhaust\",", "['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'], 'boxmode': ['current', 'box', 'mode'], 'htgmode': ['current',", "'zntsp': ['zone', 'temperature', 'setpoint'], 'ahusp': ['calculated', 'discharge', 'setpoint'], 'occc': ['occupied', 'command'], 'sdwnc': ['shutdown',", "'status'], 'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa': ['supply', 'fan', 'alarm'],", "\"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"],", "\"pressure\"], \"eap\": [\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"],", "'pressure', 'proportional', 'band'], 'spset': ['static', 'pressure', 'setpoint'], 'sprampst': ['static', 'pressure', 'output', 'ramp'], 'zpb':", "\"vav\": [\"variable\", \"volume\", \"box\"], \"fcu\": [\"fan\", \"coil\", \"unit\"], \"avg\": [\"average\"], \"cmd\": [\"command\"], \"elec\":", "'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating',", "['return', 'air', 'high', 'limit'], 'retrb': ['return', 'reset', 'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov':", "[\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\",", "'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'], 'htgminfl': ['heating', 'minimum', 'cfm',", "\"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"], \"rat\": [\"return\", \"air\", \"temperature\"], \"sap\": [\"supply\", \"air\", \"pressure\"],", "'rfs': ['return', 'fan', 'status'], 'smk': ['smoke', 'detector'], 'sds': ['smoke', 'detector'], 'dcdb': ['discharge', 'cooling',", "'band'], 'mallit': ['mixed', 'air', 'low', 'limit', 'integration'], 'mallo': ['mixed', 'air', 'low', 'limit', 'offset'],", "'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'], 'commonsp': ['common', 'setpoint'], 'clgbias': ['active', 'cooling', 'bias'], 'cmaxflo':", "'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'], 'boxelec': ['electrical', 'heat', 'protection', 'enabled'],", "['occ', 'heating', 'minimum', 'flow'], 'htgunocc': ['night', 'heating', 'setpoint'], 'network': ['network', 'setpoint'], 'occsched': ['occupancy',", "'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied', 'heating', 'setpoint'], 'spdb': ['static', 'pressure'], 'spdw':", "'setpoint'], 'occcbias': ['occupied', 'cooling', 'bias'], 'stbycbia': ['standby', 'cooling', 'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'],", "'position'], 'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'], 'mallit': ['mixed', 'air', 'low', 'limit',", "[\"exhaust\", \"air\", \"pressure\"], \"dap\": [\"discharge\", \"air\", \"pressure\"], \"oap\": [\"outside\", \"air\", \"pressure\"], \"rap\": [\"return\",", "'setpoint'], 'bhoutput': ['box', 'heat', 'output'], 'shoutput': ['supplemental', 'heat', 'output'], 'wtrflush': ['water', 'flush', 'command'],", "['static', 'pressure'], 'spdw': ['static', 'pressure', 'derivitive', 'weight'], 'spit': ['static', 'pressure', 'integration'], 'sppb': ['static',", "'status'], 'occs': ['occupied', 'status'], 'restasts': ['status', 'of', 'restart', 'delay'], 'sdwns': ['shutdown', 'status'], 'sfa':", "'bias'], 'unoccbia': ['unoccupied', 'cooling', 'bias'], 'occhbias': ['occupied', 'heating', 'bias'], 'stbyhbia': ['standby', 'heating', 'bias'],", "['unoccupied', 'cooling', 'differential'], 'ucncsp': ['unoccupied', 'cooling', 'setpoint'], 'ucnhdf': ['unoccupied', 'heating', 'differential'], 'ucnhsp': ['unoccupied',", "\"oa\": [\"outside\", \"air\"], \"ra\": [\"return\", \"air\"], \"sat\": [\"supply\", \"air\", \"temperature\"], \"eat\": [\"exhaust\", \"air\",", "\"fan\"], \"df\": [\"discharge\", \"fan\"], \"of\": [\"outside\", \"fan\"], \"rf\": [\"return\", \"fan\"], \"sup\": [\"supply\"], \"dis\":", "\"pos\": [\"position\"], \"sp\": [\"setpoint\"], \"spt\": [\"setpoint\"], \"stpt\": [\"setpoint\"], \"temp\": [\"temperature\"], \"tmp\": [\"temperature\"], \"t\":", "\"temperature\"], \"eat\": [\"exhaust\", \"air\", \"temperature\"], \"dat\": [\"discharge\", \"air\", \"temperature\"], \"oat\": [\"outside\", \"air\", \"temperature\"],", "'status'], 'sfa': ['supply', 'fan', 'alarm'], 'boxhtgc': ['box', 'heating', 'command'], 'shtgc': ['supplemental', 'heating', 'command'],", "'band'], 'swovdif': ['economizer', 'switchover', 'differential'], 'dbswov': ['economizer', 'switchover', 'setpoint'], 'minpossp': ['economizer', 'minimum', 'position']," ]
[ "docs_counter = 0 terms_counter = 0 entries_counter = 0 for line in open(vw_file_name,", "cnt except BaseException: bow[wid] = cnt for key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d", "if self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count =", "uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name,", "{} def save_vw(self, output_file): self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow", "in tokens[1:]: if token[0] == '|': current_modality = token[1:] else: parsed = token.split(':')", "= 0 entries_counter = 0 for line in open(vw_file_name, encoding='utf-8'): docs_counter += 1", "open(vocab_file, \"r\", encoding=\"utf-8\") as f: i = 1 for line in f: line", "+ \" \" + current_modality try: wid = vocab[word] except BaseException: vocab_file.write(word +", "0 entries_counter = 0 for line in open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens", "line in open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens = line.split() current_modality = \"@default_class\"", "= int(parsed[1]) except BaseException: cnt = 1 word = parsed[0] + \" \"", "(modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id % 100 == 0: # print(self.cur_doc_id) self.bow", "string)) self.out.write(\"\\n\") # if self.cur_doc_id % 100 == 0: # print(self.cur_doc_id) self.bow =", "%d %d\\n\" % (docs_counter, key, value)) entries_counter += 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name,", "% (docs_counter, key, value)) entries_counter += 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as", "tokens = line.split() current_modality = \"@default_class\" bow = dict() for token in tokens[1:]:", "vocab_file_name): vocab = dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\")", "write + ' ' self.write_doc() self.out.close() def log(self, s): if self.logger: self.logger.log(s) def", "\"\\n\") terms_counter += 1 vocab[word] = terms_counter wid = terms_counter try: bow[wid] +=", "line.replace('\\n', '') parsed = line.split() if len(parsed) == 2: self.vocab[i] = parsed else:", "vocab[word] = terms_counter wid = terms_counter try: bow[wid] += cnt except BaseException: bow[wid]", "cnt for key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key, value))", "self.logger = logger self.log(\"Start UciReader...\") self.vocab = dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as", "f: parsed = line.split() if len(parsed) != 3: continue doc_id = int(parsed[0]) if", "vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name):", "= token[1:] else: parsed = token.split(':') try: cnt = int(parsed[1]) except BaseException: cnt", "self.bow = {} def save_vw(self, output_file): self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id =", "logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab =", "try: wid = vocab[word] except BaseException: vocab_file.write(word + \"\\n\") terms_counter += 1 vocab[word]", "if self.cur_doc_id % 100 == 0: # print(self.cur_doc_id) self.bow = {} def save_vw(self,", "f: line = line.replace('\\n', '') parsed = line.split() if len(parsed) == 2: self.vocab[i]", "encoding=\"utf-8\") as f: for line in f: parsed = line.split() if len(parsed) !=", "def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file", "UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name =", "!= 3: continue doc_id = int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id =", "int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id % 1000", "key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key, value)) entries_counter +=", "value)) entries_counter += 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\"", "current_modality = \"@default_class\" bow = dict() for token in tokens[1:]: if token[0] ==", "1 self.docword_file = docword_file self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id)", "open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0 terms_counter = 0", "0 terms_counter = 0 entries_counter = 0 for line in open(vw_file_name, encoding='utf-8'): docs_counter", "tokens[1:]: if token[0] == '|': current_modality = token[1:] else: parsed = token.split(':') try:", "logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name),", "+ current_modality try: wid = vocab[word] except BaseException: vocab_file.write(word + \"\\n\") terms_counter +=", "= int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id %", "\"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for line in open(temp_docword_file_name): f.write(line) if __name__ ==", "entries_counter = 0 for line in open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens =", "= parsed[2] write = word if ':' in word: print(\"Warning! Colon found! Term", "f: i = 1 for line in f: line = line.replace('\\n', '') parsed", "as f: i = 1 for line in f: line = line.replace('\\n', '')", "self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count = parsed[2]", "word: print(\"Warning! Colon found! Term ignored.\") continue if count != \"1\": write +=", "self.out.write(\"\\n\") # if self.cur_doc_id % 100 == 0: # print(self.cur_doc_id) self.bow = {}", "terms_counter wid = terms_counter try: bow[wid] += cnt except BaseException: bow[wid] = cnt", "except BaseException: self.bow[modality] = write + ' ' self.write_doc() self.out.close() def log(self, s):", "self.bow[modality] += write + ' ' except BaseException: self.bow[modality] = write + '", "def __init__(self, docword_file, vocab_file, logger=None): self.logger = logger self.log(\"Start UciReader...\") self.vocab = dict()", "= (line, \"@default_class\") i += 1 self.docword_file = docword_file self.log(\"UCI read OK\") def", "= open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0 terms_counter =", "self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow = dict() with open(self.docword_file,", "BaseException: self.bow[modality] = write + ' ' self.write_doc() self.out.close() def log(self, s): if", "%s\" % (modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id % 100 == 0: #", "vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab", "line in f: parsed = line.split() if len(parsed) != 3: continue doc_id =", "= doc_id if self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])]", "vocab[word] except BaseException: vocab_file.write(word + \"\\n\") terms_counter += 1 vocab[word] = terms_counter wid", "' except BaseException: self.bow[modality] = write + ' ' self.write_doc() self.out.close() def log(self,", "continue doc_id = int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if", "self.cur_doc_id = 1 self.bow = dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for", "def log(self, s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci =", "+ \"\\n\") terms_counter += 1 vocab[word] = terms_counter wid = terms_counter try: bow[wid]", "token in tokens[1:]: if token[0] == '|': current_modality = token[1:] else: parsed =", "for modality, string in self.bow.items(): self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\") # if", "save_vw(self, output_file): self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow = dict()", "key, value)) entries_counter += 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as f: f.write(", "for token in tokens[1:]: if token[0] == '|': current_modality = token[1:] else: parsed", "'') parsed = line.split() if len(parsed) == 2: self.vocab[i] = parsed else: self.vocab[i]", "write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality, string in self.bow.items(): self.out.write(\"|%s %s\" %", "open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for line in f: parsed = line.split() if", "in f: parsed = line.split() if len(parsed) != 3: continue doc_id = int(parsed[0])", "found! Term ignored.\") continue if count != \"1\": write += ':' + count", "write + ' ' except BaseException: self.bow[modality] = write + ' ' self.write_doc()", "f.write(line) if __name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file =", "+= ':' + count try: self.bow[modality] += write + ' ' except BaseException:", "vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file =", "self.cur_doc_id % 100 == 0: # print(self.cur_doc_id) self.bow = {} def save_vw(self, output_file):", "wid = vocab[word] except BaseException: vocab_file.write(word + \"\\n\") terms_counter += 1 vocab[word] =", "value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key, value)) entries_counter += 1", "line.split() if len(parsed) == 2: self.vocab[i] = parsed else: self.vocab[i] = (line, \"@default_class\")", "write = word if ':' in word: print(\"Warning! Colon found! Term ignored.\") continue", "logger self.log(\"Start UciReader...\") self.vocab = dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as f: i", "temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\",", "in open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens = line.split() current_modality = \"@default_class\" bow", "self.out.close() def log(self, s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci", "vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0 terms_counter = 0 entries_counter =", "(docs_counter, terms_counter, entries_counter)) for line in open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\": docword_file", "= terms_counter try: bow[wid] += cnt except BaseException: bow[wid] = cnt for key,", "vocab_file.write(word + \"\\n\") terms_counter += 1 vocab[word] = terms_counter wid = terms_counter try:", "write += ':' + count try: self.bow[modality] += write + ' ' except", "vocab = dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file", "open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for line in", "== 2: self.vocab[i] = parsed else: self.vocab[i] = (line, \"@default_class\") i += 1", "\"r\", encoding=\"utf-8\") as f: i = 1 for line in f: line =", "= 1 word = parsed[0] + \" \" + current_modality try: wid =", "parsed = line.split() if len(parsed) == 2: self.vocab[i] = parsed else: self.vocab[i] =", "UciReader: def __init__(self, docword_file, vocab_file, logger=None): self.logger = logger self.log(\"Start UciReader...\") self.vocab =", "def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality, string in self.bow.items(): self.out.write(\"|%s %s\"", "self.bow.items(): self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id % 100 ==", "import os class UciReader: def __init__(self, docword_file, vocab_file, logger=None): self.logger = logger self.log(\"Start", "dict() for token in tokens[1:]: if token[0] == '|': current_modality = token[1:] else:", "+ ' ' except BaseException: self.bow[modality] = write + ' ' self.write_doc() self.out.close()", "docword_file, vocab_file, logger=None): self.logger = logger self.log(\"Start UciReader...\") self.vocab = dict() with open(vocab_file,", "open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0 terms_counter = 0 entries_counter = 0 for", "encoding=\"utf-8\") as f: i = 1 for line in f: line = line.replace('\\n',", "Term ignored.\") continue if count != \"1\": write += ':' + count try:", "1 for line in f: line = line.replace('\\n', '') parsed = line.split() if", "token[0] == '|': current_modality = token[1:] else: parsed = token.split(':') try: cnt =", "continue if count != \"1\": write += ':' + count try: self.bow[modality] +=", "os class UciReader: def __init__(self, docword_file, vocab_file, logger=None): self.logger = logger self.log(\"Start UciReader...\")", "1 self.bow = dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for line in", "1 tokens = line.split() current_modality = \"@default_class\" bow = dict() for token in", "if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger)", "= UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name", "self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality, string in", "if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id % 1000 ==", "OK\") def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality, string in self.bow.items(): self.out.write(\"|%s", "line in open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file =", "self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id % 100 == 0:", "= \"@default_class\" bow = dict() for token in tokens[1:]: if token[0] == '|':", "parsed = token.split(':') try: cnt = int(parsed[1]) except BaseException: cnt = 1 word", "\"r\", encoding=\"utf-8\") as f: for line in f: parsed = line.split() if len(parsed)", "line = line.replace('\\n', '') parsed = line.split() if len(parsed) == 2: self.vocab[i] =", "open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow = dict() with open(self.docword_file, \"r\", encoding=\"utf-8\")", "word, modality = self.vocab[int(parsed[1])] count = parsed[2] write = word if ':' in", "in self.bow.items(): self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id % 100", "open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens = line.split() current_modality = \"@default_class\" bow =", "= parsed else: self.vocab[i] = (line, \"@default_class\") i += 1 self.docword_file = docword_file", "\" \" + current_modality try: wid = vocab[word] except BaseException: vocab_file.write(word + \"\\n\")", "= open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0 terms_counter = 0 entries_counter = 0", "BaseException: cnt = 1 word = parsed[0] + \" \" + current_modality try:", "output_file): self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow = dict() with", "parsed else: self.vocab[i] = (line, \"@default_class\") i += 1 self.docword_file = docword_file self.log(\"UCI", "in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key, value)) entries_counter += 1 temp_docword_file.close()", "f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for line in open(temp_docword_file_name): f.write(line) if", "vocab_file.close() with open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for", "% 100 == 0: # print(self.cur_doc_id) self.bow = {} def save_vw(self, output_file): self.out", "!= self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id))", "'|': current_modality = token[1:] else: parsed = token.split(':') try: cnt = int(parsed[1]) except", "% (modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id % 100 == 0: # print(self.cur_doc_id)", "parsed[0] + \" \" + current_modality try: wid = vocab[word] except BaseException: vocab_file.write(word", "\" + current_modality try: wid = vocab[word] except BaseException: vocab_file.write(word + \"\\n\") terms_counter", "(docs_counter, key, value)) entries_counter += 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as f:", "f: for line in f: parsed = line.split() if len(parsed) != 3: continue", "\" % self.cur_doc_id) for modality, string in self.bow.items(): self.out.write(\"|%s %s\" % (modality, string))", "int(parsed[1]) except BaseException: cnt = 1 word = parsed[0] + \" \" +", "= dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as f: i = 1 for line", "f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for line in open(temp_docword_file_name): f.write(line) if __name__", "docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\vw.txt\" uci = UciReader(docword_file, vocab_file)", "1000 == 0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count = parsed[2] write =", "parsed = line.split() if len(parsed) != 3: continue doc_id = int(parsed[0]) if doc_id", "+ ' ' self.write_doc() self.out.close() def log(self, s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name,", "current_modality = token[1:] else: parsed = token.split(':') try: cnt = int(parsed[1]) except BaseException:", "+= 1 self.docword_file = docword_file self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt \" %", "1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter,", "= docword_file self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality,", "+= 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter,", "% 1000 == 0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count = parsed[2] write", "UciReader...\") self.vocab = dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as f: i = 1", "else: parsed = token.split(':') try: cnt = int(parsed[1]) except BaseException: cnt = 1", "for line in open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file", "= logger self.log(\"Start UciReader...\") self.vocab = dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as f:", "print(\"Warning! Colon found! Term ignored.\") continue if count != \"1\": write += ':'", "vocab_file, logger=None): self.logger = logger self.log(\"Start UciReader...\") self.vocab = dict() with open(vocab_file, \"r\",", "else: self.vocab[i] = (line, \"@default_class\") i += 1 self.docword_file = docword_file self.log(\"UCI read", "class UciReader: def __init__(self, docword_file, vocab_file, logger=None): self.logger = logger self.log(\"Start UciReader...\") self.vocab", "== '|': current_modality = token[1:] else: parsed = token.split(':') try: cnt = int(parsed[1])", "= 1 for line in f: line = line.replace('\\n', '') parsed = line.split()", "100 == 0: # print(self.cur_doc_id) self.bow = {} def save_vw(self, output_file): self.out =", "self.write_doc() self.out.close() def log(self, s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None):", "line in f: line = line.replace('\\n', '') parsed = line.split() if len(parsed) ==", "== 0: # print(self.cur_doc_id) self.bow = {} def save_vw(self, output_file): self.out = open(output_file,", "self.vocab[i] = parsed else: self.vocab[i] = (line, \"@default_class\") i += 1 self.docword_file =", "self.cur_doc_id) for modality, string in self.bow.items(): self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\") #", "logger=None): self.logger = logger self.log(\"Start UciReader...\") self.vocab = dict() with open(vocab_file, \"r\", encoding=\"utf-8\")", "modality, string in self.bow.items(): self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id", "if __name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\vw.txt\"", "= terms_counter wid = terms_counter try: bow[wid] += cnt except BaseException: bow[wid] =", "= 0 for line in open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens = line.split()", "+= cnt except BaseException: bow[wid] = cnt for key, value in sorted(bow.items()): temp_docword_file.write(\"%d", "0: # print(self.cur_doc_id) self.bow = {} def save_vw(self, output_file): self.out = open(output_file, \"w\",", "\"1\": write += ':' + count try: self.bow[modality] += write + ' '", "= line.split() if len(parsed) == 2: self.vocab[i] = parsed else: self.vocab[i] = (line,", "(line, \"@default_class\") i += 1 self.docword_file = docword_file self.log(\"UCI read OK\") def write_doc(self):", "= token.split(':') try: cnt = int(parsed[1]) except BaseException: cnt = 1 word =", "= {} def save_vw(self, output_file): self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1", "with open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for line", "as f: for line in f: parsed = line.split() if len(parsed) != 3:", "' ' self.write_doc() self.out.close() def log(self, s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name,", "dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for line in f: parsed =", "dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as f: i = 1 for line in", "= 0 terms_counter = 0 entries_counter = 0 for line in open(vw_file_name, encoding='utf-8'):", "for line in f: line = line.replace('\\n', '') parsed = line.split() if len(parsed)", "try: self.bow[modality] += write + ' ' except BaseException: self.bow[modality] = write +", "line.split() if len(parsed) != 3: continue doc_id = int(parsed[0]) if doc_id != self.cur_doc_id:", "log(self, s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name,", "docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name,", "% (docs_counter, terms_counter, entries_counter)) for line in open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\":", "bow = dict() for token in tokens[1:]: if token[0] == '|': current_modality =", "self.bow = dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for line in f:", "\"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0", "count try: self.bow[modality] += write + ' ' except BaseException: self.bow[modality] = write", "len(parsed) == 2: self.vocab[i] = parsed else: self.vocab[i] = (line, \"@default_class\") i +=", "= word if ':' in word: print(\"Warning! Colon found! Term ignored.\") continue if", "self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count = parsed[2] write = word if ':'", "token.split(':') try: cnt = int(parsed[1]) except BaseException: cnt = 1 word = parsed[0]", "with open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for line in f: parsed = line.split()", "\"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0 terms_counter = 0 entries_counter", "bow[wid] = cnt for key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter,", "self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality, string in self.bow.items(): self.out.write(\"|%s %s\" % (modality,", "current_modality try: wid = vocab[word] except BaseException: vocab_file.write(word + \"\\n\") terms_counter += 1", "terms_counter, entries_counter)) for line in open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\": docword_file =", "doc_id if self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count", "= line.split() current_modality = \"@default_class\" bow = dict() for token in tokens[1:]: if", "Colon found! Term ignored.\") continue if count != \"1\": write += ':' +", "' ' except BaseException: self.bow[modality] = write + ' ' self.write_doc() self.out.close() def", "in open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\"", "terms_counter try: bow[wid] += cnt except BaseException: bow[wid] = cnt for key, value", "!= \"1\": write += ':' + count try: self.bow[modality] += write + '", "\"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\vw.txt\" uci = UciReader(docword_file,", "for line in f: parsed = line.split() if len(parsed) != 3: continue doc_id", "self.vocab[int(parsed[1])] count = parsed[2] write = word if ':' in word: print(\"Warning! Colon", "if len(parsed) == 2: self.vocab[i] = parsed else: self.vocab[i] = (line, \"@default_class\") i", "<filename>algo/tools/converters.py import os class UciReader: def __init__(self, docword_file, vocab_file, logger=None): self.logger = logger", "self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def", "== \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\vw.txt\" uci =", "entries_counter += 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" %", "+= 1 tokens = line.split() current_modality = \"@default_class\" bow = dict() for token", "0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count = parsed[2] write = word if", "' self.write_doc() self.out.close() def log(self, s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name,", "__name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\vw.txt\" uci", "token[1:] else: parsed = token.split(':') try: cnt = int(parsed[1]) except BaseException: cnt =", "self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id)) word,", "modality = self.vocab[int(parsed[1])] count = parsed[2] write = word if ':' in word:", "0 for line in open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens = line.split() current_modality", "read OK\") def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality, string in self.bow.items():", "temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter = 0 terms_counter", "terms_counter = 0 entries_counter = 0 for line in open(vw_file_name, encoding='utf-8'): docs_counter +=", "= dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file =", "print(self.cur_doc_id) self.bow = {} def save_vw(self, output_file): self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id", "= cnt for key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key,", "1 vocab[word] = terms_counter wid = terms_counter try: bow[wid] += cnt except BaseException:", "\"w\", encoding='utf-8') docs_counter = 0 terms_counter = 0 entries_counter = 0 for line", "self.docword_file = docword_file self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for", "':' + count try: self.bow[modality] += write + ' ' except BaseException: self.bow[modality]", "cnt = 1 word = parsed[0] + \" \" + current_modality try: wid", "BaseException: bow[wid] = cnt for key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" %", "self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name)", "len(parsed) != 3: continue doc_id = int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id", "= dict() for token in tokens[1:]: if token[0] == '|': current_modality = token[1:]", "docs_counter += 1 tokens = line.split() current_modality = \"@default_class\" bow = dict() for", "__init__(self, docword_file, vocab_file, logger=None): self.logger = logger self.log(\"Start UciReader...\") self.vocab = dict() with", "dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name,", "= line.replace('\\n', '') parsed = line.split() if len(parsed) == 2: self.vocab[i] = parsed", "encoding='utf-8') docs_counter = 0 terms_counter = 0 entries_counter = 0 for line in", "\"@default_class\" bow = dict() for token in tokens[1:]: if token[0] == '|': current_modality", "if token[0] == '|': current_modality = token[1:] else: parsed = token.split(':') try: cnt", "except BaseException: bow[wid] = cnt for key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\"", "self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id)) word, modality", "def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name,", "self.vocab = dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as f: i = 1 for", "def save_vw(self, output_file): self.out = open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow =", "temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter))", "= self.vocab[int(parsed[1])] count = parsed[2] write = word if ':' in word: print(\"Warning!", "# print(self.cur_doc_id) self.bow = {} def save_vw(self, output_file): self.out = open(output_file, \"w\", encoding='utf-8')", "% self.cur_doc_id) for modality, string in self.bow.items(): self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\")", "string in self.bow.items(): self.out.write(\"|%s %s\" % (modality, string)) self.out.write(\"\\n\") # if self.cur_doc_id %", "doc_id = int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id", "in f: line = line.replace('\\n', '') parsed = line.split() if len(parsed) == 2:", "self.log(\"Start UciReader...\") self.vocab = dict() with open(vocab_file, \"r\", encoding=\"utf-8\") as f: i =", "\"@default_class\") i += 1 self.docword_file = docword_file self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt", "i = 1 for line in f: line = line.replace('\\n', '') parsed =", "cnt = int(parsed[1]) except BaseException: cnt = 1 word = parsed[0] + \"", "= dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for line in f: parsed", "= open(output_file, \"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow = dict() with open(self.docword_file, \"r\",", "doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id if self.cur_doc_id % 1000 == 0:", "== 0: self.log(str(self.cur_doc_id)) word, modality = self.vocab[int(parsed[1])] count = parsed[2] write = word", "s): if self.logger: self.logger.log(s) def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None): uci = UciReader(docword_file_name, vocab_file_name,", "for line in open(vw_file_name, encoding='utf-8'): docs_counter += 1 tokens = line.split() current_modality =", "entries_counter)) for line in open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\"", "= line.split() if len(parsed) != 3: continue doc_id = int(parsed[0]) if doc_id !=", "= \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\vw.txt\" uci = UciReader(docword_file, vocab_file) uci.save_vw(output_file)", "count != \"1\": write += ':' + count try: self.bow[modality] += write +", "i += 1 self.docword_file = docword_file self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt \"", "= 1 self.bow = dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as f: for line", "':' in word: print(\"Warning! Colon found! Term ignored.\") continue if count != \"1\":", "parsed[2] write = word if ':' in word: print(\"Warning! Colon found! Term ignored.\")", "temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key, value)) entries_counter += 1 temp_docword_file.close() vocab_file.close() with", "self.cur_doc_id = doc_id if self.cur_doc_id % 1000 == 0: self.log(str(self.cur_doc_id)) word, modality =", "terms_counter += 1 vocab[word] = terms_counter wid = terms_counter try: bow[wid] += cnt", "%d\\n\" % (docs_counter, key, value)) entries_counter += 1 temp_docword_file.close() vocab_file.close() with open(docword_file_name, \"w\")", "= write + ' ' self.write_doc() self.out.close() def log(self, s): if self.logger: self.logger.log(s)", "docword_file self.log(\"UCI read OK\") def write_doc(self): self.out.write(\"%06d.txt \" % self.cur_doc_id) for modality, string", "line.split() current_modality = \"@default_class\" bow = dict() for token in tokens[1:]: if token[0]", "os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter =", "BaseException: vocab_file.write(word + \"\\n\") terms_counter += 1 vocab[word] = terms_counter wid = terms_counter", "as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for line in open(temp_docword_file_name): f.write(line)", "for key, value in sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key, value)) entries_counter", "2: self.vocab[i] = parsed else: self.vocab[i] = (line, \"@default_class\") i += 1 self.docword_file", "= parsed[0] + \" \" + current_modality try: wid = vocab[word] except BaseException:", "1 word = parsed[0] + \" \" + current_modality try: wid = vocab[word]", "= os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8')", "+= 1 vocab[word] = terms_counter wid = terms_counter try: bow[wid] += cnt except", "\"w\") as f: f.write( \"%d\\n%d\\n%d\\n\" % (docs_counter, terms_counter, entries_counter)) for line in open(temp_docword_file_name):", "encoding='utf-8') self.cur_doc_id = 1 self.bow = dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as f:", "open(temp_docword_file_name): f.write(line) if __name__ == \"__main__\": docword_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\docword.lenta.txt\" vocab_file = \"D:\\\\visartm\\\\data\\\\datasets\\\\lenta\\\\UCI\\\\vocab.lenta.txt\" output_file", "encoding='utf-8'): docs_counter += 1 tokens = line.split() current_modality = \"@default_class\" bow = dict()", "ignored.\") continue if count != \"1\": write += ':' + count try: self.bow[modality]", "if ':' in word: print(\"Warning! Colon found! Term ignored.\") continue if count !=", "bow[wid] += cnt except BaseException: bow[wid] = cnt for key, value in sorted(bow.items()):", "word if ':' in word: print(\"Warning! Colon found! Term ignored.\") continue if count", "\"w\", encoding='utf-8') self.cur_doc_id = 1 self.bow = dict() with open(self.docword_file, \"r\", encoding=\"utf-8\") as", "= vocab[word] except BaseException: vocab_file.write(word + \"\\n\") terms_counter += 1 vocab[word] = terms_counter", "# if self.cur_doc_id % 100 == 0: # print(self.cur_doc_id) self.bow = {} def", "try: bow[wid] += cnt except BaseException: bow[wid] = cnt for key, value in", "sorted(bow.items()): temp_docword_file.write(\"%d %d %d\\n\" % (docs_counter, key, value)) entries_counter += 1 temp_docword_file.close() vocab_file.close()", "count = parsed[2] write = word if ':' in word: print(\"Warning! Colon found!", "self.bow[modality] = write + ' ' self.write_doc() self.out.close() def log(self, s): if self.logger:", "if count != \"1\": write += ':' + count try: self.bow[modality] += write", "except BaseException: vocab_file.write(word + \"\\n\") terms_counter += 1 vocab[word] = terms_counter wid =", "try: cnt = int(parsed[1]) except BaseException: cnt = 1 word = parsed[0] +", "+= write + ' ' except BaseException: self.bow[modality] = write + ' '", "if len(parsed) != 3: continue doc_id = int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc()", "os.path.join( os.path.dirname(vw_file_name), \"temp.txt\") temp_docword_file = open(temp_docword_file_name, \"w\") vocab_file = open(vocab_file_name, \"w\", encoding='utf-8') docs_counter", "uci = UciReader(docword_file_name, vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict()", "self.vocab[i] = (line, \"@default_class\") i += 1 self.docword_file = docword_file self.log(\"UCI read OK\")", "3: continue doc_id = int(parsed[0]) if doc_id != self.cur_doc_id: self.write_doc() self.cur_doc_id = doc_id", "vocab_file_name, logger=logger) uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name = os.path.join(", "uci.save_vw(vw_file_name) def vw2uci(vw_file_name, docword_file_name, vocab_file_name): vocab = dict() temp_docword_file_name = os.path.join( os.path.dirname(vw_file_name), \"temp.txt\")", "+ count try: self.bow[modality] += write + ' ' except BaseException: self.bow[modality] =", "in word: print(\"Warning! Colon found! Term ignored.\") continue if count != \"1\": write", "with open(vocab_file, \"r\", encoding=\"utf-8\") as f: i = 1 for line in f:", "word = parsed[0] + \" \" + current_modality try: wid = vocab[word] except", "wid = terms_counter try: bow[wid] += cnt except BaseException: bow[wid] = cnt for", "except BaseException: cnt = 1 word = parsed[0] + \" \" + current_modality" ]
[ ": status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0:", "self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value = self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa,", "status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"]", "\"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A,", "self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb, para): St =", "openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile", "value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb, para): St = np.log(pa) -", "reader = csv.reader(open(\"filtPara.csv\")) for line in reader: if line: self.pairDict[line[0]] = { \"stock_A\"", "正方向, 2 负方向, -1 不要做了 \"tradPoint\" : [] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self):", "def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] =", "if value < -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"]", "\"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] =", ": \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" :", "\"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B, \"ratio\" : ratio })", "(St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self, pairKey, data, value, para, status):", "0 except Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return", "def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\"", "不要做了 \"tradPoint\" : [] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value", "strategyEntrance(self, data): for pairKey, pairPara in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if", "\"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol,", "< 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value <", ": \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" :", "\"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\"", "#基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData", "\"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"],", "status[\"direction\"] = 0 if value < -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"]", "< -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"]", "1 if value < -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\",", "def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5]", "zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid volList = sorted(volList, key=lambda d:d[0], reverse=True)", "< 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif", "+ closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]),", "self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def", "status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\",", "pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self):", ": float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" : 0, # 0 未开仓, 1 正方向,", "float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" : 0, # 0 未开仓,", "\"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\"", "\"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\"", "pairPara in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A,", "\"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"sell\", \"dirc_B\"", "status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value < -para[\"open\"]: #负 status[\"preOpenTime\"] =", "- status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\"", "status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value < -para[\"stop\"]: #止损 ratio_A =", "line in reader: if line: self.pairDict[line[0]] = { \"stock_A\" : line[0][:6], \"stock_B\" :", "for pairKey, pairPara in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and", "for key, value in self.pairTradeStatus.items(): try: if value[\"direction\"] != -1: try: value[\"direction\"] =", "\"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\"", "def strategyEntrance(self, data): for pairKey, pairPara in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"])", "status[\"direction\"]: if value > para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\",", ": \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" :", "}) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"]", "value[\"direction\"] = 0 except Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return", "open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"],", "value < -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" :", "if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"]", "= 0 except Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"])", "\"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus", "\"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"buy\", \"dirc_B\"", "\"stock_B\" : line[0][7:15], \"beta\" : float(line[1]), \"mean\" : float(line[2]), \"std\" : float(line[3]), \"open\"", "(closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]),", "utf-8 -*- #pairTradeMultiple.py import baseMultiple import csv, copy, datetime, numpy as np class", "#------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line in reader:", "\"mean\" : float(line[2]), \"std\" : float(line[3]), \"open\" : float(line[4]), \"close\" : float(line[5]), \"stop\"", "value, para, status): if not status[\"direction\"]: if value > para[\"open\"]: #正方向 status[\"preOpenTime\"] =", "\"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\"", "onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self):", "pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa,", "copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" :", "\"ratio_B\" : ratio_B, \"ratio\" : ratio }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] =", "self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self):", "para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio", "self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"]", "status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value >", "volList return None #策略主入口 def strategyEntrance(self, data): for pairKey, pairPara in self.pairDict.items(): pa,", "= csv.reader(open(\"filtPara.csv\")) for line in reader: if line: self.pairDict[line[0]] = { \"stock_A\" :", "#pairTradeMultiple.py import baseMultiple import csv, copy, datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------", "self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb, para): St = np.log(pa) - para[\"beta\"]*np.log(pb) S", "= (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\"", "data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus =", "\"ratio\" : ratio }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2])", "- status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" :", ": line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" : float(line[1]), \"mean\" : float(line[2]), \"std\" :", "= 0 if value > para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B", "para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\",", "0 未开仓, 1 正方向, 2 负方向, -1 不要做了 \"tradPoint\" : [] } self.sendMessage((0,", "self.baseVol = 200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus =", "numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name =", "\"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\"", "\"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\"", "self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData = True", "closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile =", "pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"sell\",", "in self.pairTradeStatus.items(): try: if value[\"direction\"] != -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except", "data, value, para, status): if not status[\"direction\"]: if value > para[\"open\"]: #正方向 status[\"preOpenTime\"]", "def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self, stockCode):", "= -1 elif status[\"direction\"] == 2: #负方向 if value > -para[\"close\"]: #平仓 ratio_A", "ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\"", "St = np.log(pa) - para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号", "para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\",", "#自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手 self.outputFile =", "= \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData = True #行情数据触发函数", "self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------", ": 2, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" :", ": \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" :", "pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"sell\",", "return S #计算开平仓信号 def getTradeMessage(self, pairKey, data, value, para, status): if not status[\"direction\"]:", "def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def", "pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"]", "\"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"buy\", \"dirc_B\"", "\"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\"", "= -1 pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"]", "\"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\"", ": pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" :", "\"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B, \"ratio\"", "#平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio =", "\"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A,", "(status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey,", "self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask +", "status[\"direction\"] = 2 elif status[\"direction\"] == 1: #正方向 if value < para[\"close\"]: #平仓", "0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"] == 1:", "pb, para): St = np.log(pa) - para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"] return", "class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol =", "in reader: if line: self.pairDict[line[0]] = { \"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15],", "= np.log(pa) - para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def", "= 200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus = {}", "self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0 except Exception: pass #获得股票价格 def getStockCurPrice(self,", "value in self.pairTradeStatus.items(): try: if value[\"direction\"] != -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1])", "== 2: #负方向 if value > -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"]", "self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus", "pass #获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def", "return None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid", "value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb,", ": \"stop\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" :", "status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\",", "if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数", "1, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"],", ": status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if", "\"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"]", "-1 elif status[\"direction\"] == 2: #负方向 if value > -para[\"close\"]: #平仓 ratio_A =", "self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] =", ": data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" :", "200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus = {} self.loadPairPara()", "status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\" :", "-1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0 except Exception:", "\"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1]))", "- status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\"", "status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value < -para[\"stop\"]: #止损", "sorted(volList, key=lambda d:d[0], reverse=True) return volList return None #策略主入口 def strategyEntrance(self, data): for", "if value < para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"]", "\"open\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"],", "status[\"direction\"] == 1: #正方向 if value < para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] -", ": \"close\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" :", "pa and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb", "if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif", "if value > para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"]", "= ask + bid volList = sorted(volList, key=lambda d:d[0], reverse=True) return volList return", "+ bid volList = sorted(volList, key=lambda d:d[0], reverse=True) return volList return None #策略主入口", "> para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"]", "#行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data)", "stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList:", "\"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\"", "pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value =", "status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"] == 1: #正方向 if value < para[\"close\"]:", ": 0, # 0 未开仓, 1 正方向, 2 负方向, -1 不要做了 \"tradPoint\" :", "= openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"]))", "self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value = self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey,", "#发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey])", "value = self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B)))", "\"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\"", "(ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" :", "!= -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0 except", "if value[\"direction\"] != -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] =", "status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15],", "status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"]", "status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if", "line[0][7:15], \"beta\" : float(line[1]), \"mean\" : float(line[2]), \"std\" : float(line[3]), \"open\" : float(line[4]),", "ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6],", "ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"]))", "True #行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"]))", "ratio_B, \"ratio\" : ratio }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1],", "openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] +", "stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList =", "\"beta\" : float(line[1]), \"mean\" : float(line[2]), \"std\" : float(line[3]), \"open\" : float(line[4]), \"close\"", "#计算开平仓信号 def getTradeMessage(self, pairKey, data, value, para, status): if not status[\"direction\"]: if value", "key=lambda d:d[0], reverse=True) return volList return None #策略主入口 def strategyEntrance(self, data): for pairKey,", "as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\"", "if not status[\"direction\"]: if value > para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\"", "for line in reader: if line: self.pairDict[line[0]] = { \"stock_A\" : line[0][:6], \"stock_B\"", "#获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]),", "self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值", "closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"],", "except Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None", "volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value = self.getPairValue(pa, pb,", ": pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" :", "self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade):", "0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志", "- status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\"", "= { \"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" : float(line[1]), \"mean\" :", "{ \"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" : float(line[1]), \"mean\" : float(line[2]),", ": line[0][7:15], \"beta\" : float(line[1]), \"mean\" : float(line[2]), \"std\" : float(line[3]), \"open\" :", "return None #策略主入口 def strategyEntrance(self, data): for pairKey, pairPara in self.pairDict.items(): pa, pb", "not status[\"direction\"]: if value > para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" :", "autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法", "ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\"", "\"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B,", "closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"], openTrade[\"dirc_B\"], openTrade[\"pb\"], closeTrade[\"pb\"],closeTrade[\"ratio_B\"], closeTrade[\"ratio\"]) outputFile.write(content)", "#自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade()", "= (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" :", "status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value < -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({", "if value > para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\"", "\"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\"", "in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A, volList_B", "if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] =", "creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"]", "def getTradeMessage(self, pairKey, data, value, para, status): if not status[\"direction\"]: if value >", ": ratio_B, \"ratio\" : ratio }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"]", "<filename>multipleStrategy/pairTradeMultiple.py #!/usr/bin/python # -*- coding: utf-8 -*- #pairTradeMultiple.py import baseMultiple import csv, copy,", "float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" : 0, # 0 未开仓, 1 正方向, 2", ": status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B, \"ratio\" : ratio }) if", "< 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"] ==", ": para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"buy\", \"dirc_B\" :", "\".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData = True #行情数据触发函数 def", "= { \"direction\" : 0, # 0 未开仓, 1 正方向, 2 负方向, -1", ": float(line[3]), \"open\" : float(line[4]), \"close\" : float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] =", "CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol = 200", "pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"],", "float(line[3]), \"open\" : float(line[4]), \"close\" : float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = {", "\"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\"", "\"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"], openTrade[\"dirc_B\"], openTrade[\"pb\"], closeTrade[\"pb\"],closeTrade[\"ratio_B\"],", "pairKey, pairPara in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb:", "reverse=True) return volList return None #策略主入口 def strategyEntrance(self, data): for pairKey, pairPara in", "volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb, para):", "self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"]", "status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value < -para[\"open\"]: #负", "dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self,", "= (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\"", "< 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if", ": 2, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" :", ": float(line[4]), \"close\" : float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" :", "np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol", "self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] =", "self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"] == 1: #正方向 if value <", "\"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\"", "status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志 def", "< para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"]", "{} self.loadPairPara() self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData", "pb value = self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A,", "- para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self, pairKey, data, value, para, status): if", "status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\" :", "volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb, para): St", "= (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" :", "#获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self,", "self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items(): try: if value[\"direction\"]", ": data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" :", "data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb, para): St = np.log(pa)", "= 1 if value < -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" :", "pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value,", "status[\"direction\"] = -1 elif status[\"direction\"] == 2: #负方向 if value > -para[\"close\"]: #平仓", "#读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line in reader: if line: self.pairDict[line[0]]", "pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]),", "-*- coding: utf-8 -*- #pairTradeMultiple.py import baseMultiple import csv, copy, datetime, numpy as", "status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志 def creatTradingLog(self, closeTrade,", "csv, copy, datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def", "closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"], openTrade[\"dirc_B\"], openTrade[\"pb\"], closeTrade[\"pb\"],closeTrade[\"ratio_B\"], closeTrade[\"ratio\"])", "}) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1", "(status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey,", "self.pairDict = {} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self,", ": \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" :", "S = (St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self, pairKey, data, value,", ": self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"]", "\"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] })", "elif status[\"direction\"] == 1: #正方向 if value < para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"]", "status[\"direction\"] == 2: #负方向 if value > -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] -", "-1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%(", "import baseMultiple import csv, copy, datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数", "-para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio", "self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line in", "elif status[\"direction\"] == 2: #负方向 if value > -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"]", "if pa and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa,", "= {} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self, data):", "\"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\"", "self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"] == 2: #负方向 if", "False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict,", "execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items(): try: if value[\"direction\"] != -1: try: value[\"direction\"]", "#正方向 if value < para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B =", "} self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items(): try: if", "\"direction\" : 1, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\"", "ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid", "value < para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] -", "str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"], openTrade[\"dirc_B\"], openTrade[\"pb\"], closeTrade[\"pb\"],closeTrade[\"ratio_B\"], closeTrade[\"ratio\"]) outputFile.write(content) outputFile.close()", "getTradeMessage(self, pairKey, data, value, para, status): if not status[\"direction\"]: if value > para[\"open\"]:", "d:d[0], reverse=True) return volList return None #策略主入口 def strategyEntrance(self, data): for pairKey, pairPara", "= zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid volList = sorted(volList, key=lambda d:d[0],", "self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False", "status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value > para[\"stop\"]: #止损", "pairKey, data, value, para, status): if not status[\"direction\"]: if value > para[\"open\"]: #正方向", "value > para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" :", "status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"] ==", "\"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] <", "status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\",", "value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0 except Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode):", "= -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content =", ": ratio }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3,", "status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B, \"ratio\" : ratio }) if para[\"beta\"]", ": 1, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" :", "\"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\"", "datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name", "#------------------------------ #读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line in reader: if line:", "para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1", "#正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\" :", "data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass", "\"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" : 0, # 0 未开仓, 1", "\"open\" : float(line[4]), \"close\" : float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\"", "< -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey,", "\"std\" : float(line[3]), \"open\" : float(line[4]), \"close\" : float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]]", "\"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol,", "para): St = np.log(pa) - para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"] return S", "def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus) def", "status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\",", "self.name = \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict =", "copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid volList = sorted(volList,", "ratio }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1]))", "= 2 elif status[\"direction\"] == 1: #正方向 if value < para[\"close\"]: #平仓 ratio_A", "2 elif status[\"direction\"] == 1: #正方向 if value < para[\"close\"]: #平仓 ratio_A =", "ask + bid volList = sorted(volList, key=lambda d:d[0], reverse=True) return volList return None", "S #计算开平仓信号 def getTradeMessage(self, pairKey, data, value, para, status): if not status[\"direction\"]: if", "data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"],", "- status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" :", "= sorted(volList, key=lambda d:d[0], reverse=True) return volList return None #策略主入口 def strategyEntrance(self, data):", "= (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"],", "= self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"],", "#止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio =", "= self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0 except Exception: pass #获得股票价格 def", "#------------------------------ #自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手 self.outputFile", "\"direction\" : 2, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\"", "{} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self, data): if", "volList = sorted(volList, key=lambda d:d[0], reverse=True) return volList return None #策略主入口 def strategyEntrance(self,", "pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"buy\",", "#------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量", "status[\"direction\"] = 0 if value > para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"]", "value < -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] -", "\"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B,", "except Exception: value[\"direction\"] = 0 except Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode): if", "1 正方向, 2 负方向, -1 不要做了 \"tradPoint\" : [] } self.sendMessage((0, self.pairDict)) def", "= self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数", "bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid volList = sorted(volList, key=lambda", "-*- #pairTradeMultiple.py import baseMultiple import csv, copy, datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple):", "status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value < -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] -", "self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value < -para[\"stop\"]: #止损 ratio_A", "#执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line in reader: if", ": data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" :", "if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if", "reader: if line: self.pairDict[line[0]] = { \"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15], \"beta\"", "self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def", "= status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value < -para[\"stop\"]:", "#!/usr/bin/python # -*- coding: utf-8 -*- #pairTradeMultiple.py import baseMultiple import csv, copy, datetime,", "self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value = self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2,", "value > para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] -", ": para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"sell\", \"dirc_B\" :", ": [] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items():", "Exception: value[\"direction\"] = 0 except Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList:", "value > -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] -", "self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value < -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"]", "(ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" :", "self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data,", "1, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"],", "负方向, -1 不要做了 \"tradPoint\" : [] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for", "value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0 except Exception: pass #获得股票价格", "if value < -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\"", "# 0 未开仓, 1 正方向, 2 负方向, -1 不要做了 \"tradPoint\" : [] }", "(pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def", "{ \"direction\" : 0, # 0 未开仓, 1 正方向, 2 负方向, -1 不要做了", "-1 pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] =", "#继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self): self.name = \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手", "#止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio =", "\"direction\" : 0, # 0 未开仓, 1 正方向, 2 负方向, -1 不要做了 \"tradPoint\"", "data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line", "if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask", "outputFile = open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"],", "\"direction\" : 2, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\"", "#平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio =", "1: #正方向 if value < para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B", "status[\"direction\"] = -1 pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] < 0:", "def getPairValue(self, pa, pb, para): St = np.log(pa) - para[\"beta\"]*np.log(pb) S = (St", "line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" : float(line[1]), \"mean\" : float(line[2]), \"std\" : float(line[3]),", "200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {} self.pairTradeStatus = {} self.loadPairPara() self.isFirstData =", "pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2,", "pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara,", "key, value in self.pairTradeStatus.items(): try: if value[\"direction\"] != -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"]", "= open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"],", "pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"buy\",", "self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value > para[\"stop\"]: #止损 ratio_A", "status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value <", "None #策略主入口 def strategyEntrance(self, data): for pairKey, pairPara in self.pairDict.items(): pa, pb =", "pa, pb, para): St = np.log(pa) - para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"]", "copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5]", "= status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value > para[\"stop\"]:", "= copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\"", "pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data):", "float(line[4]), \"close\" : float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" : 0,", "closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"]", "\"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\"", "volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value = self.getPairValue(pa,", "[] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items(): try:", "= 0 if value < -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B", "ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6],", "\"stop\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"],", "and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value", "= status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"] == 2:", "bid volList = sorted(volList, key=lambda d:d[0], reverse=True) return volList return None #策略主入口 def", "closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content", "getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList", "0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value", "\"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" : float(line[1]), \"mean\" : float(line[2]), \"std\"", "self.isFirstData = False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict", "para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\"", "self.pairTradeStatus = {} self.loadPairPara() self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData:", "pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self, pa, pb, para): St = np.log(pa) - para[\"beta\"]*np.log(pb)", ": self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"]", "customInit(self): self.name = \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict", ": \"open\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" :", "0, # 0 未开仓, 1 正方向, 2 负方向, -1 不要做了 \"tradPoint\" : []", "try: if value[\"direction\"] != -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"]", ": float(line[1]), \"mean\" : float(line[2]), \"std\" : float(line[3]), \"open\" : float(line[4]), \"close\" :", "def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------", "== 1: #正方向 if value < para[\"close\"]: #平仓 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"]", "Exception: pass #获得股票价格 def getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据", "status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"] == 2: #负方向 if value > -para[\"close\"]:", "if line: self.pairDict[line[0]] = { \"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" :", "def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items(): try: if value[\"direction\"] != -1:", "#策略主入口 def strategyEntrance(self, data): for pairKey, pairPara in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]),", "= \"pairTradeMultiple\" self.baseVol = 200 #基本开仓量 200手 self.outputFile = \".\\\\log\\\\tradPoint.csv\" self.pairDict = {}", "self.loadPairPara() self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData =", "(status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey,", "status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\" :", "-para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio", "data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"],", "\"direction\" : 1, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\"", "2 负方向, -1 不要做了 \"tradPoint\" : [] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\")", "volList = ask + bid volList = sorted(volList, key=lambda d:d[0], reverse=True) return volList", "2, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"],", "0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"]", "self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items(): try: if value[\"direction\"] !=", ": pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" :", "2, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"],", "self.pairTradeStatus.items(): try: if value[\"direction\"] != -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception:", "status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6],", "status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value > para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] -", "ratio_A, \"ratio_B\" : ratio_B, \"ratio\" : ratio }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"]", "para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\",", "None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid =", "- para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self, pairKey,", "0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value < -para[\"open\"]:", "para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0", "str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"], openTrade[\"dirc_B\"], openTrade[\"pb\"], closeTrade[\"pb\"],closeTrade[\"ratio_B\"], closeTrade[\"ratio\"]) outputFile.write(content) outputFile.close() pass", "status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B, \"ratio\" : ratio", "= False self.sendMessage((1, data[\"dateTime\"])) self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict =", "= (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" :", "self.pairDict[line[0]] = { \"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" : float(line[1]), \"mean\"", "def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\"))", "#计算配对策略值 def getPairValue(self, pa, pb, para): St = np.log(pa) - para[\"beta\"]*np.log(pb) S =", "loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in self.pairTradeStatus.items(): try: if value[\"direction\"] != -1: try:", "= (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({", ": 1, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" :", "copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid volList = sorted(volList, key=lambda d:d[0], reverse=True) return", "-para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\"", ": ratio_A, \"ratio_B\" : ratio_B, \"ratio\" : ratio }) if para[\"beta\"] < 0:", "= (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" :", "para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self, pairKey, data,", "= closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile,", "para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"]", "#创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"]", "try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0 except Exception: pass", "(status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\"", ": data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" :", "\"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\"", "closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\") content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"],", "ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\"", "def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line in reader: if line: self.pairDict[line[0]] =", "ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"]))", ": para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"buy\", \"dirc_B\" :", "(status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\"", "status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value > para[\"stop\"]: #止损 ratio_A =", "return volList return None #策略主入口 def strategyEntrance(self, data): for pairKey, pairPara in self.pairDict.items():", "data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey]) #计算配对策略值 def getPairValue(self,", "copy, datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数 def customInit(self):", "0 if value < -para[\"stop\"]: #止损 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B =", "\"tradPoint\" : [] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key, value in", "self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value < -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"])", "data): for pairKey, pairPara in self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa", ": pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" :", "= (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({", "\"close\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"],", "\"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" : self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"]", "return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]),", "if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask", "}) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2", "0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"]", "content = \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"], openTrade[\"dirc_B\"],", "self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: ask =", "getPairValue(self, pa, pb, para): St = np.log(pa) - para[\"beta\"]*np.log(pb) S = (St -", "= True #行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data) self.isFirstData = False self.sendMessage((1,", "para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self, pairKey, data, value, para, status): if not", "self.strategyEntrance(data) def dayEnd(self): pass #自动保存缓存触发函数 def autosaveCache(self): self.saveCache(pairDict = self.pairDict, pairTradeStatus = self.pairTradeStatus)", "ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\"", "= status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"] == 1: #正方向 if", "0 if value > para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B =", "import csv, copy, datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------ #自定义初始化函数", "= status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志 def creatTradingLog(self,", "self.pairDict.items(): pa, pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A, volList_B =", "coding: utf-8 -*- #pairTradeMultiple.py import baseMultiple import csv, copy, datetime, numpy as np", ": float(line[2]), \"std\" : float(line[3]), \"open\" : float(line[4]), \"close\" : float(line[5]), \"stop\" :", "\"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"sell\", \"dirc_B\"", "data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"],", "= \"%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\\n\"%( closeTrade[\"pairKey\"], closeTrade[\"type\"], str(openTrade[\"dateTime\"]), str(closeTrade[\"dateTime\"]), closeTrade[\"stock_A\"], openTrade[\"dirc_A\"], openTrade[\"pa\"], closeTrade[\"pa\"],closeTrade[\"ratio_A\"], closeTrade[\"stock_B\"], openTrade[\"dirc_B\"], openTrade[\"pb\"],", "baseMultiple import csv, copy, datetime, numpy as np class CPairTradeMultiple(baseMultiple.CBaseMultiple): #------------------------------ #继承重载函数 #------------------------------", ": float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" : 0, # 0", ": status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B, \"ratio\" :", "np.log(pa) - para[\"beta\"]*np.log(pb) S = (St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self,", "csv.reader(open(\"filtPara.csv\")) for line in reader: if line: self.pairDict[line[0]] = { \"stock_A\" : line[0][:6],", "= pa, pb value = self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb,", "= self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader", "if value > -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"]", "= {} self.loadPairPara() self.isFirstData = True #行情数据触发函数 def onRtnMarketData(self, data): if self.isFirstData: self.firstDataTrigger(data)", "status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\" :", "loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for line in reader: if line: self.pairDict[line[0]] = {", "status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"] == 1: #正方向 if value", "para, status): if not status[\"direction\"]: if value > para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"])", "status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"] == 2: #负方向 if value", "= (St - para[\"mean\"])/para[\"std\"] return S #计算开平仓信号 def getTradeMessage(self, pairKey, data, value, para,", ": \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"vol_a\" :", "< 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] =", "self.pairTradeStatus) def firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader =", "# -*- coding: utf-8 -*- #pairTradeMultiple.py import baseMultiple import csv, copy, datetime, numpy", "zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid volList =", "status[\"tradPoint\"].append({ \"type\" : \"close\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15],", "status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass #创建交易日志 def creatTradingLog(self, closeTrade, openTrade): if closeTrade[\"beta\"] <", "firstDataTrigger(self, data): self.loadTrueTrade() #------------------------------ #执行策略方法 #------------------------------ #读取配对参数 def loadPairPara(self): reader = csv.reader(open(\"filtPara.csv\")) for", "self.pairTradeStatus[line[0]] = { \"direction\" : 0, # 0 未开仓, 1 正方向, 2 负方向,", "= self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value = self.getPairValue(pa, pb, pairPara)", "value[\"direction\"] != -1: try: value[\"direction\"] = self.positionsPair[key][-1][\"direction\"] value[\"tradPoint\"].append(self.positionsPair[key][-1]) except Exception: value[\"direction\"] = 0", "float(line[1]), \"mean\" : float(line[2]), \"std\" : float(line[3]), \"open\" : float(line[4]), \"close\" : float(line[5]),", "closeTrade, openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"]", "- status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey, \"stock_A\"", "status): if not status[\"direction\"]: if value > para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({", ": para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"sell\", \"dirc_B\" :", "para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"] - status[\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio", "openTrade): if closeTrade[\"beta\"] < 0: openTrade[\"dirc_B\"] = openTrade[\"dirc_A\"] closeTrade[\"dirc_B\"] = closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] =", "self.baseVol, \"vol_b\" : self.baseVol*status[\"pa\"]*para[\"beta\"]/status[\"pb\"] }) if para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3,", "> para[\"open\"]: #正方向 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey,", ": \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" :", "closeTrade[\"dirc_A\"] closeTrade[\"ratio_B\"] = -1*closeTrade[\"ratio_B\"] closeTrade[\"ratio\"] = (closeTrade[\"ratio_A\"] + closeTrade[\"ratio_B\"]*np.abs(closeTrade[\"beta\"]))/(1+np.abs(closeTrade[\"beta\"])) outputFile = open(self.outputFile, \"a\")", "getStockCurPrice(self, stockCode): if self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList: return copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"close\"]) return None #获得股票行数数据 def getStockOfferStatus(self, stockCode): if", "\"close\" : float(line[5]), \"stop\" : float(line[6])} self.pairTradeStatus[line[0]] = { \"direction\" : 0, #", "self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"]) self.pairTradeStatus[pairKey][\"pa\"], self.pairTradeStatus[pairKey][\"pb\"] = pa, pb value = self.getPairValue(pa, pb, pairPara) #发送参数信号", "para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 2, \"dirc_A\" : \"buy\", \"dirc_B\" : \"sell\",", "2: #负方向 if value > -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B", "pairKey[:6], \"stock_B\" : pairKey[7:15], \"beta\" : para[\"beta\"], \"dateTime\" : data[\"dateTime\"], \"direction\" : 1,", "未开仓, 1 正方向, 2 负方向, -1 不要做了 \"tradPoint\" : [] } self.sendMessage((0, self.pairDict))", ": \"buy\", \"dirc_B\" : \"sell\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\" :", "\"ratio_A\" : ratio_A, \"ratio_B\" : ratio_B, \"ratio\" : ratio }) if para[\"beta\"] <", "status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 2 elif status[\"direction\"] == 1: #正方向", "status[\"direction\"] = 1 if value < -para[\"open\"]: #负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\"", "< 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 pass", "self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"] == 2: #负方向 if value >", "-1 不要做了 \"tradPoint\" : [] } self.sendMessage((0, self.pairDict)) def loadTrueTrade(self): execfile(\".\\\\log\\\\tureTrade.log\") for key,", "(status[\"pb\"] - status[\"tradPoint\"][-1][\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"] ratio = (ratio_A+ratio_B*np.abs(para[\"beta\"]))/(1+np.abs(para[\"beta\"])) status[\"tradPoint\"].append({ \"type\" : \"stop\", \"pairKey\" : pairKey,", "pa, pb value = self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value,", "pb = self.getStockCurPrice(pairPara[\"stock_A\"]), self.getStockCurPrice(pairPara[\"stock_B\"]) if pa and pb: volList_A, volList_B = self.getStockOfferStatus(pairPara[\"stock_A\"]), self.getStockOfferStatus(pairPara[\"stock_B\"])", "self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 0 if value > para[\"stop\"]: #止损 ratio_A = (status[\"tradPoint\"][-1][\"pa\"]", "status[\"tradPoint\"][-1][\"dirc_A\"] self.creatTradingLog(status[\"tradPoint\"][-1], status[\"tradPoint\"][-2]) self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = -1 elif status[\"direction\"] == 2: #负方向", "= zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"askVol\"])))[:5] bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidPrice\"]), copy.copy(self.actuatorDict[stockCode].signalObjDict[\"baseSignal\"].MDList[-1][\"bidVol\"])))[:5] volList = ask + bid volList", "> -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B = (status[\"tradPoint\"][-1][\"pb\"] - status[\"pb\"])*0.9992/status[\"tradPoint\"][-1][\"pb\"]", "\"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"], \"pb\" : status[\"pb\"], \"ratio_A\"", "line: self.pairDict[line[0]] = { \"stock_A\" : line[0][:6], \"stock_B\" : line[0][7:15], \"beta\" : float(line[1]),", "= self.getPairValue(pa, pb, pairPara) #发送参数信号 self.sendMessage((2, (pairKey, data[\"dateTime\"],pa, pb, value, volList_A, volList_B))) self.getTradeMessage(pairKey,", "#负 status[\"preOpenTime\"] = copy.copy(data[\"dateTime\"]) status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\" :", "= status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value < -para[\"open\"]: #负 status[\"preOpenTime\"]", "float(line[2]), \"std\" : float(line[3]), \"open\" : float(line[4]), \"close\" : float(line[5]), \"stop\" : float(line[6])}", "status[\"tradPoint\"].append({ \"type\" : \"open\", \"pairKey\" : pairKey, \"stock_A\" : pairKey[:6], \"stock_B\" : pairKey[7:15],", "#负方向 if value > -para[\"close\"]: #平仓 ratio_A = (status[\"pa\"] - status[\"tradPoint\"][-1][\"pa\"])*0.9992/status[\"tradPoint\"][-1][\"pa\"] ratio_B =", "data[\"dateTime\"], \"direction\" : 1, \"dirc_A\" : \"sell\", \"dirc_B\" : \"buy\", \"pa\" : status[\"pa\"],", "para[\"beta\"] < 0: status[\"tradPoint\"][-1][\"dirc_B\"] = status[\"tradPoint\"][-1][\"dirc_A\"] self.sendMessage((3, status[\"tradPoint\"][-1])) status[\"direction\"] = 1 if value" ]
[ "if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb') as f: bytesToSend = f.read(1024) sendedData", "acceder au repertoir transmis par le client ( cd Nom_Fichier) # os.chdir renvoie", "de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") #", "L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de", "a aucune des commandes permises # transmettre le code 400 ConnexionAUnClient.send((\"400 Commande inconue", "client : \"Type_Commande:NomFichier\" # Type_Commande == 3 changer le repertoire courant (Change Working", "commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la", "repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la machine = \", addrclient) while True: #", "< taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR", "os.listdir(\".\") revoie la liste des fichier / repertoires dans le repertoire courant (comme", "# Python 3! import socket import os import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient):", "with open(Nom_Fichier, 'rb') as f: bytesToSend = f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while", "= f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend = f.read(1024)", "k, bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\")", "par le client est inexistant) # transmettre le code 404 ConnexionAUnClient.send((\"404 Le repertoire", "code 400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de", "client souhaite deconnecter # par exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion try:", "/ (tentative de parcourir les repertoires ) if( \"..\" in Nom_Fichier or \"/\"", "repertoir en parametre try: # acceder au repertoir transmis par le client (", "'OK': with open(Nom_Fichier, 'rb') as f: bytesToSend = f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend)", "de fichier # Nom_Fichier ne doit pas contenir \"..\" ou / (tentative de", "ConnexionAUnClient.send((\"401 Format de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print(", "commence par \"0:\" cela signifie que le client souhaite deconnecter # par exemple", "telechargement de fichier # Nom_Fichier ne doit pas contenir \"..\" ou / (tentative", "= socket.socket() port = 9500 # Obtention du chemin courant (getcwd()==get current working", "userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb') as f: bytesToSend = f.read(1024) sendedData =", "Type_Commande == 0 Si la commende commence par \"0:\" cela signifie que le", "sendedData < taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\") else:", "except: # en cas d'exception (le repertoir demandee par le client est inexistant)", "client ( cd Nom_Fichier) # os.chdir renvoie une exeception si le nom du", "repertoir demandee par le client est inexistant) # transmettre le code 404 ConnexionAUnClient.send((\"404", "Nom_Fichier) # os.chdir renvoie une exeception si le nom du repertoir en parametre", "boucle de traitement while break elif(Type_Commande==1):# commande de telechargement de fichier # Nom_Fichier", "if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK", "import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion", "en parametre try: # acceder au repertoir transmis par le client ( cd", "commende commence par \"0:\" cela signifie que le client souhaite deconnecter # par", "commandes permises # transmettre le code 400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\")", "envoie de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect : \"+Commande).encode())", "la commende commence par \"0:\" cela signifie que le client souhaite deconnecter #", "repetoir courant (comme la commande \"dir\" dans Windows et \"ls\" dans Linux) #", "404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande", "commande recue == \"1:File.png\" signifie telecharger \"File.png\" ) # Type_Commande == 0 Si", "et \"cd\" dans Windows et Linux) # Type_Commande == 2 demander la liste", "DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste des instructions if os.path.isfile(Nom_Fichier):", "Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la machine = \", addrclient) while", "le code 400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie", "ConnexionAUnClient.close() except : pass SocketServeur = socket.socket() port = 9500 # Obtention du", "in Nom_Fichier or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue", "par exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la", "9500 # Obtention du chemin courant (getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] #", "Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par le caractere \":\" Arguments=Commande.split(\":\") # le premier", "401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass", "while break elif(Type_Commande==1):# commande de telechargement de fichier # Nom_Fichier ne doit pas", "): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste des instructions", "TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste des instructions if os.path.isfile(Nom_Fichier): taille", "OK \"+Nom_Fichier).encode()) except: # en cas d'exception (le repertoir demandee par le client", "ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande du", "par le caractere \":\" Arguments=Commande.split(\":\") # le premier argument represente le type de", "deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par le caractere \":\" Arguments=Commande.split(\":\") #", "# Type_Commande == 0 Si la commende commence par \"0:\" cela signifie que", "Python 3! import socket import os import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): #", "elif(Type_Commande==1):# commande de telechargement de fichier # Nom_Fichier ne doit pas contenir \"..\"", "len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister le contenu du", "k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en cas d'exception (le repertoir demandee", "demandee par le client est inexistant) # transmettre le code 404 ConnexionAUnClient.send((\"404 Le", "aucune des commandes permises # transmettre le code 400 ConnexionAUnClient.send((\"400 Commande inconue =", "2 demander la liste des fichiers dans le repetoir courant (comme la commande", "(pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en cas d'exception (le", "souhaite deconnecter # par exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\")", "ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la fin d'une reponse", "\"1:File.png\" signifie telecharger \"File.png\" ) # Type_Commande == 0 Si la commende commence", "incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try:", "le client ( cd Nom_Fichier) # os.chdir renvoie une exeception si le nom", "nom du repertoir en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")):", "commande du client ne correspond a aucune des commandes permises # transmettre le", "SocketServeur = socket.socket() port = 9500 # Obtention du chemin courant (getcwd()==get current", "else: # si la commande du client ne correspond a aucune des commandes", "commande d'acces au repertoir en parametre try: # acceder au repertoir transmis par", "par le client ( cd Nom_Fichier) # os.chdir renvoie une exeception si le", "serveur\") while True: # Attente d'une connexion (accept) ConnexionAUnClient, addrclient = SocketServeur.accept() _thread.start_new_thread(TraitNewConnection,(ConnexionAUnClient,addrclient))", "# si la commande du client ne correspond a aucune des commandes permises", "try: # envoie de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect", "transmettre le code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: #", "\"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur = socket.socket() port = 9500", "\"--\\r\\n\\r\\n\" delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle de", "la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie", "transmis par le client ( cd Nom_Fichier) # os.chdir renvoie une exeception si", "repertoir courant # os.listdir(\".\") revoie la liste des fichier / repertoires dans le", "File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister le contenu du repertoir courant", "if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] ==", "le client est inexistant) # transmettre le code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\"", "TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la machine = \",", "# acceder au repertoir transmis par le client ( cd Nom_Fichier) # os.chdir", "# transmettre le code 400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try:", "bytesToSend = f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend =", "fichier / repertoires dans le repertoire courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode())", "exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande", "os.chdir renvoie une exeception si le nom du repertoir en parametre (\"Nom_Fichier\") n'existe", "# Obtention du chemin courant (getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout", "de la machine = \", addrclient) while True: # Format des commandes transmis", "la liste des fichier / repertoires dans le repertoire courant (comme ls et", "la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle de traitement while", "parametre try: # acceder au repertoir transmis par le client ( cd Nom_Fichier)", "# decouper la commande par le caractere \":\" Arguments=Commande.split(\":\") # le premier argument", "ou / (tentative de parcourir les repertoires ) if( \"..\" in Nom_Fichier or", "d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle de traitement while break elif(Type_Commande==1):#", "import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la", "if( \"..\" in Nom_Fichier or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\")", "est inexistant) # transmettre le code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode())", "ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle de traitement while break elif(Type_Commande==1):# commande de", "len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend)", "du client ne correspond a aucune des commandes permises # transmettre le code", "FTP et \"cd\" dans Windows et Linux) # Type_Commande == 2 demander la", "pas contenir \"..\" ou / (tentative de parcourir les repertoires ) if( \"..\"", "Obtention du chemin courant (getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de", "== 'OK': with open(Nom_Fichier, 'rb') as f: bytesToSend = f.read(1024) sendedData = len(bytesToSend)", "\"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format", "f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour", "Nom_Fichier or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue #", "de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la fin", "in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste", "contenu du repertoir courant # os.listdir(\".\") revoie la liste des fichier / repertoires", "\", addrclient) while True: # Format des commandes transmis par le client :", "\"0:CYA\" et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par le", "'rb') as f: bytesToSend = f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData <", "f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend)", "de traitement while break elif(Type_Commande==1):# commande de telechargement de fichier # Nom_Fichier ne", "ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb') as", "le nom du repertoir en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in", "au repertoir transmis par le client ( cd Nom_Fichier) # os.chdir renvoie une", "signifie que le client souhaite deconnecter # par exemple \"0:\", \"0:CYA\" et \"0:leaving\"", "or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver", "print(\"Connexion de la machine = \", addrclient) while True: # Format des commandes", "des fichiers dans le repetoir courant (comme la commande \"dir\" dans Windows et", "commandes transmis par le client : \"Type_Commande:NomFichier\" # Type_Commande == 3 changer le", "la boucle de traitement while break elif(Type_Commande==1):# commande de telechargement de fichier #", "exemple si la commande recue == \"1:File.png\" signifie telecharger \"File.png\" ) # Type_Commande", "= \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401", "dans Windows et \"ls\" dans Linux) # Type_Commande == 1 telecharger un fichier(", "connexion sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True:", "premier argument represente le type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de", "les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True: # Attente d'une", "par \"0:\" cela signifie que le client souhaite deconnecter # par exemple \"0:\",", "_thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la machine", "courant (getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur toutes", "instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2]", "3 changer le repertoire courant (Change Working Dirrectory \"CWD\" dans FTP et \"cd\"", "argument represente le type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion", ": pass SocketServeur = socket.socket() port = 9500 # Obtention du chemin courant", "permises # transmettre le code 400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except:", "ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb') as f: bytesToSend = f.read(1024)", "re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de", "dans le repertoire courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande", "\"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par", "commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une", "code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except:", "ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except : pass", "si la commande du client ne correspond a aucune des commandes permises #", "\"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le", "type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\")", "os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb')", "syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break", "de connexion sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while", "DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste des instructions if os.path.isfile(Nom_Fichier): taille =", "Si la commende commence par \"0:\" cela signifie que le client souhaite deconnecter", "break elif(Type_Commande==1):# commande de telechargement de fichier # Nom_Fichier ne doit pas contenir", "else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en cas d'exception (le repertoir demandee par", "des commandes transmis par le client : \"Type_Commande:NomFichier\" # Type_Commande == 3 changer", "transmis par le client : \"Type_Commande:NomFichier\" # Type_Commande == 3 changer le repertoire", "import os import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine", "d'exception (le repertoir demandee par le client est inexistant) # transmettre le code", "ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de", "pass SocketServeur = socket.socket() port = 9500 # Obtention du chemin courant (getcwd()==get", "Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en cas d'exception", "print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister le contenu du repertoir", "+= len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister le contenu", "400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de code", "repertoire courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au", "== 0 Si la commende commence par \"0:\" cela signifie que le client", "cela signifie que le client souhaite deconnecter # par exemple \"0:\", \"0:CYA\" et", "commande \"dir\" dans Windows et \"ls\" dans Linux) # Type_Commande == 1 telecharger", "in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except:", "just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en cas d'exception (le repertoir", "# Format des commandes transmis par le client : \"Type_Commande:NomFichier\" # Type_Commande ==", "try: # acceder au repertoir transmis par le client ( cd Nom_Fichier) #", "Working Dirrectory \"CWD\" dans FTP et \"cd\" dans Windows et Linux) # Type_Commande", "en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope", "while True: # Attente d'une connexion (accept) ConnexionAUnClient, addrclient = SocketServeur.accept() _thread.start_new_thread(TraitNewConnection,(ConnexionAUnClient,addrclient)) SocketServeur.close()", "\"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except :", "signifie telecharger \"File.png\" ) # Type_Commande == 0 Si la commende commence par", "except: try: # envoie de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande", "# L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir", "= len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData +=", "ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir en parametre try: # acceder au repertoir", "os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: #", "revoie la liste des fichier / repertoires dans le repertoire courant (comme ls", "courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir", "cas d'exception (le repertoir demandee par le client est inexistant) # transmettre le", "toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True: # Attente", "courant # os.listdir(\".\") revoie la liste des fichier / repertoires dans le repertoire", "ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en cas", "\"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb') as f:", "si la commande recue == \"1:File.png\" signifie telecharger \"File.png\" ) # Type_Commande ==", "# Type_Commande == 3 changer le repertoire courant (Change Working Dirrectory \"CWD\" dans", "<gh_stars>1-10 # Python 3! import socket import os import re import _thread def", "client ne correspond a aucune des commandes permises # transmettre le code 400", "fichier # Nom_Fichier ne doit pas contenir \"..\" ou / (tentative de parcourir", "le repertoire courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces", ": \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except", "Windows et \"ls\" dans Linux) # Type_Commande == 1 telecharger un fichier( par", "pour lister le contenu du repertoir courant # os.listdir(\".\") revoie la liste des", "\"..\" ou / (tentative de parcourir les repertoires ) if( \"..\" in Nom_Fichier", "courant (comme la commande \"dir\" dans Windows et \"ls\" dans Linux) # Type_Commande", "Nom_Fichier ne doit pas contenir \"..\" ou / (tentative de parcourir les repertoires", "repertoire courant (Change Working Dirrectory \"CWD\" dans FTP et \"cd\" dans Windows et", "demander la liste des fichiers dans le repetoir courant (comme la commande \"dir\"", "\"+Nom_Fichier).encode()) except: # en cas d'exception (le repertoir demandee par le client est", "du chemin courant (getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion", "decouper la commande par le caractere \":\" Arguments=Commande.split(\":\") # le premier argument represente", "userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb') as f: bytesToSend", "ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir en parametre try: # acceder au", "(comme la commande \"dir\" dans Windows et \"ls\" dans Linux) # Type_Commande ==", "fichiers dans le repetoir courant (comme la commande \"dir\" dans Windows et \"ls\"", "# Esquiver le reste des instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode())", "True: # Format des commandes transmis par le client : \"Type_Commande:NomFichier\" # Type_Commande", "delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle de traitement", "une exeception si le nom du repertoir en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier)", "(Change Working Dirrectory \"CWD\" dans FTP et \"cd\" dans Windows et Linux) #", "Type_Commande == 2 demander la liste des fichiers dans le repetoir courant (comme", "contenir \"..\" ou / (tentative de parcourir les repertoires ) if( \"..\" in", "# Type_Commande == 2 demander la liste des fichiers dans le repetoir courant", "\"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande du client ne correspond", "sendedData += len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister le", "== 1 telecharger un fichier( par exemple si la commande recue == \"1:File.png\"", "break try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur = socket.socket()", "ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande du client ne correspond a aucune des", "Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de code 401: syntaxe", "== 2 demander la liste des fichiers dans le repetoir courant (comme la", "au repertoir en parametre try: # acceder au repertoir transmis par le client", "de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print( \"Deconnexion de", "Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir en parametre try: # acceder", "# envoie de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect :", "\"dir\" dans Windows et \"ls\" dans Linux) # Type_Commande == 1 telecharger un", "par le client : \"Type_Commande:NomFichier\" # Type_Commande == 3 changer le repertoire courant", "telecharger \"File.png\" ) # Type_Commande == 0 Si la commende commence par \"0:\"", "de parcourir les repertoires ) if( \"..\" in Nom_Fichier or \"/\" in Nom_Fichier", "# Type_Commande == 1 telecharger un fichier( par exemple si la commande recue", "\"CWD\" dans FTP et \"cd\" dans Windows et Linux) # Type_Commande == 2", "d'une sequence \"--\\r\\n\\r\\n\" delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la", "de :\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur = socket.socket() port = 9500 #", "sequence \"--\\r\\n\\r\\n\" delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle", "except: pass break try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur", "doit pas contenir \"..\" ou / (tentative de parcourir les repertoires ) if(", "Ecout de connexion sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\")", "repertoir transmis par le client ( cd Nom_Fichier) # os.chdir renvoie une exeception", "\"File.png\" ) # Type_Commande == 0 Si la commende commence par \"0:\" cela", "le type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k,", "ne correspond a aucune des commandes permises # transmettre le code 400 ConnexionAUnClient.send((\"400", "la commande par le caractere \":\" Arguments=Commande.split(\":\") # le premier argument represente le", "client est inexistant) # transmettre le code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est", "= 9500 # Obtention du chemin courant (getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1]", "\"0:\" cela signifie que le client souhaite deconnecter # par exemple \"0:\", \"0:CYA\"", "Sortir de la boucle de traitement while break elif(Type_Commande==1):# commande de telechargement de", "(comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir en", "SocketServeur.listen(1) print(\"Lancement serveur\") while True: # Attente d'une connexion (accept) ConnexionAUnClient, addrclient =", "as f: bytesToSend = f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille:", "liste des fichiers dans le repetoir courant (comme la commande \"dir\" dans Windows", "que le client souhaite deconnecter # par exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient", "Linux) # Type_Commande == 2 demander la liste des fichiers dans le repetoir", "Dirrectory \"CWD\" dans FTP et \"cd\" dans Windows et Linux) # Type_Commande ==", "un fichier( par exemple si la commande recue == \"1:File.png\" signifie telecharger \"File.png\"", "changer le repertoire courant (Change Working Dirrectory \"CWD\" dans FTP et \"cd\" dans", "deconnecter # par exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") #", "Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\"", "\"ls\" dans Linux) # Type_Commande == 1 telecharger un fichier( par exemple si", "Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande du client", "Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1)", "# os.chdir renvoie une exeception si le nom du repertoir en parametre (\"Nom_Fichier\")", "de la boucle de traitement while break elif(Type_Commande==1):# commande de telechargement de fichier", "= f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande", "# en cas d'exception (le repertoir demandee par le client est inexistant) #", "inexistant) # transmettre le code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\")", "elif(Type_Commande==3):# commande d'acces au repertoir en parametre try: # acceder au repertoir transmis", "(tentative de parcourir les repertoires ) if( \"..\" in Nom_Fichier or \"/\" in", "parcourir les repertoires ) if( \"..\" in Nom_Fichier or \"/\" in Nom_Fichier ):", "du repertoir en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine)", "ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister le contenu du repertoir courant # os.listdir(\".\")", "# Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la machine = \", addrclient)", "Type_Commande == 3 changer le repertoire courant (Change Working Dirrectory \"CWD\" dans FTP", "ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done", "f: bytesToSend = f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend", "fichier( par exemple si la commande recue == \"1:File.png\" signifie telecharger \"File.png\" )", "= \", addrclient) while True: # Format des commandes transmis par le client", "Esquiver le reste des instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse", "de telechargement de fichier # Nom_Fichier ne doit pas contenir \"..\" ou /", "traitement while break elif(Type_Commande==1):# commande de telechargement de fichier # Nom_Fichier ne doit", "et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir en parametre try:", "par exemple si la commande recue == \"1:File.png\" signifie telecharger \"File.png\" ) #", "(getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur toutes les", "exeception si le nom du repertoir en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if(", "reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle de traitement while break elif(Type_Commande==1):# commande", "os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200", "et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par le caractere", "Windows et Linux) # Type_Commande == 2 demander la liste des fichiers dans", "elif(Type_Commande==2):# commande pour lister le contenu du repertoir courant # os.listdir(\".\") revoie la", "ne doit pas contenir \"..\" ou / (tentative de parcourir les repertoires )", "while True: # Format des commandes transmis par le client : \"Type_Commande:NomFichier\" #", ": \"Type_Commande:NomFichier\" # Type_Commande == 3 changer le repertoire courant (Change Working Dirrectory", "caractere \":\" Arguments=Commande.split(\":\") # le premier argument represente le type de la commande", "Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une sequence", "n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\")", "courant (Change Working Dirrectory \"CWD\" dans FTP et \"cd\" dans Windows et Linux)", "code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la", "== \"1:File.png\" signifie telecharger \"File.png\" ) # Type_Commande == 0 Si la commende", "( cd Nom_Fichier) # os.chdir renvoie une exeception si le nom du repertoir", ":\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur = socket.socket() port = 9500 # Obtention", "sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData < taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData", "si le nom du repertoir en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine", "# os.listdir(\".\") revoie la liste des fichier / repertoires dans le repertoire courant", "recue == \"1:File.png\" signifie telecharger \"File.png\" ) # Type_Commande == 0 Si la", "current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur toutes les insterfacces", "else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister le contenu du repertoir courant #", "# par exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper", "la commande recue == \"1:File.png\" signifie telecharger \"File.png\" ) # Type_Commande == 0", "repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande du client ne", "try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par le caractere \":\" Arguments=Commande.split(\":\") # le", "insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True: # Attente d'une connexion", "le repetoir courant (comme la commande \"dir\" dans Windows et \"ls\" dans Linux)", "== 3 changer le repertoire courant (Change Working Dirrectory \"CWD\" dans FTP et", "le client : \"Type_Commande:NomFichier\" # Type_Commande == 3 changer le repertoire courant (Change", "os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en", "addrclient) while True: # Format des commandes transmis par le client : \"Type_Commande:NomFichier\"", "if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite", "des instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if", "ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste des instructions if", "incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close()", "liste des fichier / repertoires dans le repertoire courant (comme ls et dir)", "ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):# commande pour lister", "directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\",", "port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True: # Attente d'une connexion (accept) ConnexionAUnClient, addrclient", ") if( \"..\" in Nom_Fichier or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL", "la machine = \", addrclient) while True: # Format des commandes transmis par", "la commande du client ne correspond a aucune des commandes permises # transmettre", "os.chdir(Chemin_racine) print(\"Connexion de la machine = \", addrclient) while True: # Format des", "dans Linux) # Type_Commande == 1 telecharger un fichier( par exemple si la", "reste des instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024)", "au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la machine = \", addrclient) while True:", "signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par le caractere \":\" Arguments=Commande.split(\":\")", "# transmettre le code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else:", "= ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier, 'rb') as f: bytesToSend =", "(0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True: # Attente d'une connexion (accept)", "print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur = socket.socket() port =", "le client souhaite deconnecter # par exemple \"0:\", \"0:CYA\" et \"0:leaving\" signifient deconnexion", "\") elif(Type_Commande==2):# commande pour lister le contenu du repertoir courant # os.listdir(\".\") revoie", "repertoir en parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501", "3! import socket import os import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder", "des commandes permises # transmettre le code 400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode())", "le code 404 ConnexionAUnClient.send((\"404 Le repertoire \"+Nom_Fichier+\" est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si", "import socket import os import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au", "taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with", "Format de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print( \"Deconnexion", "Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste des", ") # Type_Commande == 0 Si la commende commence par \"0:\" cela signifie", "correspond a aucune des commandes permises # transmettre le code 400 ConnexionAUnClient.send((\"400 Commande", "Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else: ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode())", "bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") #", "pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just, k?)\") else:", "le contenu du repertoir courant # os.listdir(\".\") revoie la liste des fichier /", "le premier argument represente le type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande", "represente le type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):# commande de deconnexion ConnexionAUnClient.send(b\"200", "en cas d'exception (le repertoir demandee par le client est inexistant) # transmettre", "cd Nom_Fichier) # os.chdir renvoie une exeception si le nom du repertoir en", "le reste des instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse =", "\"cd\" dans Windows et Linux) # Type_Commande == 2 demander la liste des", "parametre (\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls,", "print(\"Lancement serveur\") while True: # Attente d'une connexion (accept) ConnexionAUnClient, addrclient = SocketServeur.accept()", "dans FTP et \"cd\" dans Windows et Linux) # Type_Commande == 2 demander", "fin d'une reponse ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") # Sortir de la boucle de traitement while break", "ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir en parametre", "des fichier / repertoires dans le repertoire courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\")", "bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \") elif(Type_Commande==2):#", "Type_Commande == 1 telecharger un fichier( par exemple si la commande recue ==", "deconnexion ConnexionAUnClient.send(b\"200 k, bye\") # L'envoie d'une sequence \"--\\r\\n\\r\\n\" delimite la fin d'une", "while sendedData < taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\")", "continue # Esquiver le reste des instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS", "os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK':", "def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine os.chdir(Chemin_racine) print(\"Connexion de la machine =", "(\"Nom_Fichier\") n'existe pas os.chdir(Nom_Fichier) if( Reprtoir_racine in os.listdir(\".\")): os.chdir(Reprtoir_racine) ConnexionAUnClient.send(b\"501 Nope (pls, just,", "inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de code 401: syntaxe incorrecte", "commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: pass break try: print( \"Deconnexion de :\",addrclient)", "repertoires ) if( \"..\" in Nom_Fichier or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY", "machine = \", addrclient) while True: # Format des commandes transmis par le", "socket import os import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire", "la commande \"dir\" dans Windows et \"ls\" dans Linux) # Type_Commande == 1", "les repertoires ) if( \"..\" in Nom_Fichier or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501", "Arguments=Commande.split(\":\") # le premier argument represente le type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1]", "de code 401: syntaxe incorrecte ConnexionAUnClient.send((\"401 Format de commande incorrect : \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\")", "commande pour lister le contenu du repertoir courant # os.listdir(\".\") revoie la liste", "sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True: #", "et Linux) # Type_Commande == 2 demander la liste des fichiers dans le", "0 Si la commende commence par \"0:\" cela signifie que le client souhaite", "port = 9500 # Obtention du chemin courant (getcwd()==get current working directory) Chemin_racine=os.getcwd()", "ConnexionAUnClient.send((\"200 OK \"+Nom_Fichier).encode()) except: # en cas d'exception (le repertoir demandee par le", "pass break try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur =", "dans le repetoir courant (comme la commande \"dir\" dans Windows et \"ls\" dans", "= os.path.getsize(Nom_Fichier) ConnexionAUnClient.sendall(b\"EXISTS \"+str(taille).encode()) userResponse = ConnexionAUnClient.recv(1024) if userResponse.decode('utf-8')[:2] == 'OK': with open(Nom_Fichier,", "SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement serveur\") while True: # Attente d'une connexion (accept) ConnexionAUnClient,", "# Nom_Fichier ne doit pas contenir \"..\" ou / (tentative de parcourir les", "1 telecharger un fichier( par exemple si la commande recue == \"1:File.png\" signifie", "\"..\" in Nom_Fichier or \"/\" in Nom_Fichier ): ConnexionAUnClient.send(b\"501 DIRECTORY TRAVERSAL DENIED\") ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\")", "le repertoire courant (Change Working Dirrectory \"CWD\" dans FTP et \"cd\" dans Windows", "renvoie une exeception si le nom du repertoir en parametre (\"Nom_Fichier\") n'existe pas", "# le premier argument represente le type de la commande Type_Commande=int(Arguments[0]) Nom_Fichier=Arguments[1] if(Type_Commande==0):#", "transmettre le code 400 ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: #", "taille: bytesToSend = f.read(1024) ConnexionAUnClient.send(bytesToSend) sendedData += len(bytesToSend) print(\"Done File\") else: ConnexionAUnClient.sendall(b\"ERR \")", "working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur toutes les insterfacces (0.0.0.0)", "dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):# commande d'acces au repertoir en parametre try: #", "racine os.chdir(Chemin_racine) print(\"Connexion de la machine = \", addrclient) while True: # Format", "ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") continue # Esquiver le reste des instructions if os.path.isfile(Nom_Fichier): taille = os.path.getsize(Nom_Fichier)", "telecharger un fichier( par exemple si la commande recue == \"1:File.png\" signifie telecharger", "try: print( \"Deconnexion de :\",addrclient) ConnexionAUnClient.close() except : pass SocketServeur = socket.socket() port", "la liste des fichiers dans le repetoir courant (comme la commande \"dir\" dans", "Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port))", "commande de telechargement de fichier # Nom_Fichier ne doit pas contenir \"..\" ou", "le caractere \":\" Arguments=Commande.split(\":\") # le premier argument represente le type de la", "# Sortir de la boucle de traitement while break elif(Type_Commande==1):# commande de telechargement", "(le repertoir demandee par le client est inexistant) # transmettre le code 404", "socket.socket() port = 9500 # Obtention du chemin courant (getcwd()==get current working directory)", "lister le contenu du repertoir courant # os.listdir(\".\") revoie la liste des fichier", "\"Type_Commande:NomFichier\" # Type_Commande == 3 changer le repertoire courant (Change Working Dirrectory \"CWD\"", "et \"ls\" dans Linux) # Type_Commande == 1 telecharger un fichier( par exemple", "\":\" Arguments=Commande.split(\":\") # le premier argument represente le type de la commande Type_Commande=int(Arguments[0])", "introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande du client ne correspond a aucune", "du repertoir courant # os.listdir(\".\") revoie la liste des fichier / repertoires dans", "ConnexionAUnClient.send((\"400 Commande inconue = \"+Commande).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") except: try: # envoie de code 401:", "os import re import _thread def TraitNewConnection(ConnexionAUnClient,addrclient): # Acceder au repertoire racine os.chdir(Chemin_racine)", "open(Nom_Fichier, 'rb') as f: bytesToSend = f.read(1024) sendedData = len(bytesToSend) ConnexionAUnClient.send(bytesToSend) while sendedData", "/ repertoires dans le repertoire courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\")", "except : pass SocketServeur = socket.socket() port = 9500 # Obtention du chemin", "repertoires dans le repertoire courant (comme ls et dir) Liste_Des_Fichiers=os.listdir(\".\") ConnexionAUnClient.send((\"\\n\".join(Liste_Des_Fichiers)).encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") elif(Type_Commande==3):#", "Linux) # Type_Commande == 1 telecharger un fichier( par exemple si la commande", "\"0:leaving\" signifient deconnexion try: Commande=ConnexionAUnClient.recv(1024).decode(\"utf-8\") # decouper la commande par le caractere \":\"", "dans Windows et Linux) # Type_Commande == 2 demander la liste des fichiers", "# Ecout de connexion sur toutes les insterfacces (0.0.0.0) SocketServeur.bind((\"0.0.0.0\", port)) SocketServeur.listen(1) print(\"Lancement", "d'acces au repertoir en parametre try: # acceder au repertoir transmis par le", "Format des commandes transmis par le client : \"Type_Commande:NomFichier\" # Type_Commande == 3", "commande par le caractere \":\" Arguments=Commande.split(\":\") # le premier argument represente le type", "est introuvable\").encode()) ConnexionAUnClient.send(b\"--\\r\\n\\r\\n\") else: # si la commande du client ne correspond a", "chemin courant (getcwd()==get current working directory) Chemin_racine=os.getcwd() Reprtoir_racine=os.getcwd().split(\"\\\\\")[-1] # Ecout de connexion sur" ]
[ "qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes,", "\".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts): cal = Calendar() for tt in", "'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes = page_to_idxes(page_url) except MyError", "dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev = Event() date", "%H:%M\" begin_dt = datetime.datetime.strptime(date + \" \" + begin, fmt) end_dt = datetime.datetime.strptime(date", "vText import qrcode import qrcode.image.svg from io import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"]", "= page_to_idxes(page_url) except MyError as err: return render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\",", "return response def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise", "page = requests.get(url_tt) out = [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not", "datetime.datetime.strptime(date + \" \" + end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline'])", "MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'GET': return render_template(\"form.html\",", "end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts): cal", "Calendar, Event, vText import qrcode import qrcode.image.svg from io import BytesIO app =", "professor personal page\".format( url=url )) url_tt = m.group(0) + \"/timetable\" page = requests.get(url_tt)", "ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev", "redirect, make_response) import requests import re import datetime from icalendar import Calendar, Event,", "response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response def page_to_idxes(url):", "as err: return render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\",", "page.text)] if not out: raise MyError(\"idx not found on page {url_tt}\".format( url_tt=url_tt ))", "import datetime from icalendar import Calendar, Event, vText import qrcode import qrcode.image.svg from", "app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception): pass", "BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out =", "url_for, g, request, jsonify, redirect, make_response) import requests import re import datetime from", "= url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts", "def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now -", "out def qr(data): factory = qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io = BytesIO()", "\"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\",", "@app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url", "delta) todate = dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev", "def hello_world(): if request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try:", "page_to_idxes(page_url) except MyError as err: return render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes),", "= dt_to_Ymd(now - delta) todate = dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate, todate)", "raise MyError(\"idx not found on page {url_tt}\".format( url_tt=url_tt )) return out def qr(data):", "= request.form.get('url') try: idxes = page_to_idxes(page_url) except MyError as err: return render_template(\"form.html\", url=page_url,", "ev = Event() date = lesson['date'] begin = lesson['beginLesson'] end = lesson['endLesson'] fmt", "tt in tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson)) return cal if __name__ ==", "look like HSE professor personal page\".format( url=url )) url_tt = m.group(0) + \"/timetable\"", "out = [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise MyError(\"idx", "jsonify, redirect, make_response) import requests import re import datetime from icalendar import Calendar,", "out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta =", "vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts): cal = Calendar() for tt", "render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url))", "todate = dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev =", "Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET',", "return out def qr(data): factory = qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io =", "receiverType='1')).json()) return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now()", "error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def", "= qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate):", "request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes = page_to_idxes(page_url)", "begin = lesson['beginLesson'] end = lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date", "== 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes = page_to_idxes(page_url) except", "fmt = \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date + \" \" + begin, fmt)", "+ begin, fmt) end_dt = datetime.datetime.strptime(date + \" \" + end, fmt) ev.add(\"dtstart\",", "ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts): cal = Calendar() for", "<filename>ttics.py from flask import (Flask, render_template, abort, send_from_directory, url_for, g, request, jsonify, redirect,", "page\".format( url=url )) url_tt = m.group(0) + \"/timetable\" page = requests.get(url_tt) out =", "= [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise MyError(\"idx not", "fromdate, todate) def lesson_to_event(lesson): ev = Event() date = lesson['date'] begin = lesson['beginLesson']", "= 'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method", "idxes = page_to_idxes(page_url) except MyError as err: return render_template(\"form.html\", url=page_url, error=str(err)) url =", "if not out: raise MyError(\"idx not found on page {url_tt}\".format( url_tt=url_tt )) return", "= lesson['beginLesson'] end = lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date +", "fmt) end_dt = datetime.datetime.strptime(date + \" \" + end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\",", "= datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta) todate = dt_to_Ymd(now + delta) return", "dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate", "cal = Calendar() for tt in tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson)) return", "response def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise MyError(", "re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise MyError(\"idx not found on page {url_tt}\".format( url_tt=url_tt", "\" + end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'],", "if request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes =", "ev def tt_to_ical(tts): cal = Calendar() for tt in tts: for lesson in", "app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def hello_world(): if", "return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta", "lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date + \" \" + begin,", "\"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date + \" \" + begin, fmt) end_dt =", "= \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date + \" \" + begin, fmt) end_dt", "weeks=10): now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta) todate", "MyError as err: return render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return", "= datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta) todate = dt_to_Ymd(now", "idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def", "request, jsonify, redirect, make_response) import requests import re import datetime from icalendar import", "url=url )) url_tt = m.group(0) + \"/timetable\" page = requests.get(url_tt) out = [m.group(1)", "+ \" \" + begin, fmt) end_dt = datetime.datetime.strptime(date + \" \" +", "[m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise MyError(\"idx not found", "def ics(idxes): tts = get_current_timetable(idxes) cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] =", "m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise MyError(\"idx not found on page", "idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt): return", "BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception):", "lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts): cal = Calendar() for tt in tts:", "import qrcode import qrcode.image.svg from io import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] =", "\"https://www.hse.ru/api/timetable/lessons\" out = [] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json())", "delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta) todate = dt_to_Ymd(now + delta)", "qrcode import qrcode.image.svg from io import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\"", "tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\"", "get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = [] for idx in idxes.split(\"_\"):", "entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = [] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate,", "image_factory=factory) io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint =", "requests import re import datetime from icalendar import Calendar, Event, vText import qrcode", "= Calendar() for tt in tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson)) return cal", "qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint", "= \"text/calendar; charset=utf-8\" return response def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if", "from flask import (Flask, render_template, abort, send_from_directory, url_for, g, request, jsonify, redirect, make_response)", "request.form.get('url') try: idxes = page_to_idxes(page_url) except MyError as err: return render_template(\"form.html\", url=page_url, error=str(err))", "try: idxes = page_to_idxes(page_url) except MyError as err: return render_template(\"form.html\", url=page_url, error=str(err)) url", "= m.group(0) + \"/timetable\" page = requests.get(url_tt) out = [m.group(1) for m in", "img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = []", "response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return", "def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks)", "return ev def tt_to_ical(tts): cal = Calendar() for tt in tts: for lesson", "r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise MyError( \"{url} doesn't look like HSE professor", "- delta) todate = dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson):", "for tt in tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson)) return cal if __name__", "= Event() date = lesson['date'] begin = lesson['beginLesson'] end = lesson['endLesson'] fmt =", "+ \"/timetable\" page = requests.get(url_tt) out = [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)]", "found on page {url_tt}\".format( url_tt=url_tt )) return out def qr(data): factory = qrcode.image.svg.SvgImage", "def get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = [] for idx in", "+ end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium',", "re import datetime from icalendar import Calendar, Event, vText import qrcode import qrcode.image.svg", "for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise MyError(\"idx not found on", "io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = [] for idx", "MyError( \"{url} doesn't look like HSE professor personal page\".format( url=url )) url_tt =", "= \"https://www.hse.ru/api/timetable/lessons\" out = [] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx,", "\"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def hello_world():", "raise MyError( \"{url} doesn't look like HSE professor personal page\".format( url=url )) url_tt", "+ delta) return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev = Event() date =", "render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes = page_to_idxes(page_url) except MyError as err:", "end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')])))", "tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson)) return cal if __name__ == '__main__': app.run()", "\"text/calendar; charset=utf-8\" return response def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not", "todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = [] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate,", "Event() date = lesson['date'] begin = lesson['beginLesson'] end = lesson['endLesson'] fmt = \"%Y.%m.%d", "@app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes) cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"]", "{url_tt}\".format( url_tt=url_tt )) return out def qr(data): factory = qrcode.image.svg.SvgImage img = qrcode.make(data,", "page_url = request.form.get('url') try: idxes = page_to_idxes(page_url) except MyError as err: return render_template(\"form.html\",", "err: return render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url,", "url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes):", "page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise MyError( \"{url} doesn't", "'POST']) def hello_world(): if request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url')", "out: raise MyError(\"idx not found on page {url_tt}\".format( url_tt=url_tt )) return out def", "end = lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date + \" \"", "qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes) cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8'))", "get_current_timetable(idxes) cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"]", "return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes) cal =", "charset=utf-8\" return response def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m:", "for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt):", "HSE professor personal page\".format( url=url )) url_tt = m.group(0) + \"/timetable\" page =", "io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\"", "lesson['beginLesson'] end = lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date + \"", "qr(data): factory = qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io) return", "url_tt = m.group(0) + \"/timetable\" page = requests.get(url_tt) out = [m.group(1) for m", "= tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar;", "todate=todate, lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now", "_external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes) cal", "dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now", "response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url)", "= lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt = datetime.datetime.strptime(date + \" \" +", "return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = [] for", "from io import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru'", "= lesson['date'] begin = lesson['beginLesson'] end = lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt", "make_response) import requests import re import datetime from icalendar import Calendar, Event, vText", "datetime.datetime.strptime(date + \" \" + begin, fmt) end_dt = datetime.datetime.strptime(date + \" \"", "now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta) todate =", "= make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response", "if not m: raise MyError( \"{url} doesn't look like HSE professor personal page\".format(", "= qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def", "out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes,", "re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise MyError( \"{url} doesn't look like HSE", "MyError(\"idx not found on page {url_tt}\".format( url_tt=url_tt )) return out def qr(data): factory", "fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return", "fromdate = dt_to_Ymd(now - delta) todate = dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate,", "return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev = Event() date = lesson['date'] begin", "ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts):", "'')]))) return ev def tt_to_ical(tts): cal = Calendar() for tt in tts: for", "url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics')", "not m: raise MyError( \"{url} doesn't look like HSE professor personal page\".format( url=url", "lesson['date'] begin = lesson['beginLesson'] end = lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\" begin_dt =", "in tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson)) return cal if __name__ == '__main__':", "import qrcode.image.svg from io import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME']", "Calendar() for tt in tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson)) return cal if", "= get_current_timetable(idxes) cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\")", "flask import (Flask, render_template, abort, send_from_directory, url_for, g, request, jsonify, redirect, make_response) import", "requests.get(url_tt) out = [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise", "import (Flask, render_template, abort, send_from_directory, url_for, g, request, jsonify, redirect, make_response) import requests", "return render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url,", "\"/timetable\" page = requests.get(url_tt) out = [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if", "not found on page {url_tt}\".format( url_tt=url_tt )) return out def qr(data): factory =", "out = [] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return", "import re import datetime from icalendar import Calendar, Event, vText import qrcode import", "return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes = page_to_idxes(page_url) except MyError as", "personal page\".format( url=url )) url_tt = m.group(0) + \"/timetable\" page = requests.get(url_tt) out", "on page {url_tt}\".format( url_tt=url_tt )) return out def qr(data): factory = qrcode.image.svg.SvgImage img", "idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes)", "url_tt=url_tt )) return out def qr(data): factory = qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory)", "date = lesson['date'] begin = lesson['beginLesson'] end = lesson['endLesson'] fmt = \"%Y.%m.%d %H:%M\"", "m.group(0) + \"/timetable\" page = requests.get(url_tt) out = [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\",", "\" \" + begin, fmt) end_dt = datetime.datetime.strptime(date + \" \" + end,", "\" \" + end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\",", "m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise MyError( \"{url} doesn't look", ")) url_tt = m.group(0) + \"/timetable\" page = requests.get(url_tt) out = [m.group(1) for", "= [] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out", "get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev = Event() date = lesson['date'] begin =", "app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'GET':", "import requests import re import datetime from icalendar import Calendar, Event, vText import", "render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes) cal = tt_to_ical(tts)", "'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method ==", "pass @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"])", "img = qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate,", "ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts): cal =", "= dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev = Event()", "ics(idxes): tts = get_current_timetable(idxes) cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment;", "(Flask, render_template, abort, send_from_directory, url_for, g, request, jsonify, redirect, make_response) import requests import", "m: raise MyError( \"{url} doesn't look like HSE professor personal page\".format( url=url ))", "params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10):", "dt_to_Ymd(now - delta) todate = dt_to_Ymd(now + delta) return get_timetable(idxes, fromdate, todate) def", "in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out: raise MyError(\"idx not found on page {url_tt}\".format(", "fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out = [] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint,", "import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class", "end_dt = datetime.datetime.strptime(date + \" \" + end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt)", "except MyError as err: return render_template(\"form.html\", url=page_url, error=str(err)) url = url_for(\"ics\", idxes=\"_\".join(idxes), _external=True)", "(\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response def page_to_idxes(url): m =", "tt_to_ical(tts): cal = Calendar() for tt in tts: for lesson in tt['Lessons']: cal.add_component(lesson_to_event(lesson))", "= requests.get(url_tt) out = [m.group(1) for m in re.finditer(r\"idx.push\\('(\\d+)'\\);\", page.text)] if not out:", "g, request, jsonify, redirect, make_response) import requests import re import datetime from icalendar", "not out: raise MyError(\"idx not found on page {url_tt}\".format( url_tt=url_tt )) return out", "send_from_directory, url_for, g, request, jsonify, redirect, make_response) import requests import re import datetime", "= \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def", "datetime from icalendar import Calendar, Event, vText import qrcode import qrcode.image.svg from io", "class MyError(Exception): pass @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'GET': return", "+ \" \" + end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\",", "lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def tt_to_ical(tts): cal = Calendar()", "icalendar import Calendar, Event, vText import qrcode import qrcode.image.svg from io import BytesIO", "= datetime.datetime.strptime(date + \" \" + begin, fmt) end_dt = datetime.datetime.strptime(date + \"", "cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] =", "\" + begin, fmt) end_dt = datetime.datetime.strptime(date + \" \" + end, fmt)", "app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/', methods=['GET', 'POST'])", "\"{url} doesn't look like HSE professor personal page\".format( url=url )) url_tt = m.group(0)", "abort, send_from_directory, url_for, g, request, jsonify, redirect, make_response) import requests import re import", "dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes) cal = tt_to_ical(tts) response =", "= re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise MyError( \"{url} doesn't look like", "factory = qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\")", "tts = get_current_timetable(idxes) cal = tt_to_ical(tts) response = make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \"", "return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate =", "datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta) todate = dt_to_Ymd(now +", "todate) def lesson_to_event(lesson): ev = Event() date = lesson['date'] begin = lesson['beginLesson'] end", "hello_world(): if request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes", "def lesson_to_event(lesson): ev = Event() date = lesson['date'] begin = lesson['beginLesson'] end =", "begin_dt = datetime.datetime.strptime(date + \" \" + begin, fmt) end_dt = datetime.datetime.strptime(date +", "begin, fmt) end_dt = datetime.datetime.strptime(date + \" \" + end, fmt) ev.add(\"dtstart\", begin_dt)", "def tt_to_ical(tts): cal = Calendar() for tt in tts: for lesson in tt['Lessons']:", "begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\", lesson['discipline']) ev.add(\"location\", vText(\", \".join([lesson['building'], lesson.get('auditorium', '')]))) return ev def", "url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts = get_current_timetable(idxes) cal = tt_to_ical(tts) response", "io import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True", "= datetime.datetime.strptime(date + \" \" + end, fmt) ev.add(\"dtstart\", begin_dt) ev.add(\"dtend\", end_dt) ev.add(\"summary\",", "Event, vText import qrcode import qrcode.image.svg from io import BytesIO app = Flask(__name__)", "= (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response def page_to_idxes(url): m", "doesn't look like HSE professor personal page\".format( url=url )) url_tt = m.group(0) +", "get_current_timetable(idxes, weeks=10): now = datetime.datetime.now() delta = datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta)", "= Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] = 'math-info.hse.ru' app.debug=True class MyError(Exception): pass @app.route('/',", "methods=['GET', 'POST']) def hello_world(): if request.method == 'GET': return render_template(\"form.html\", rootdir=app.config[\"APPLICATION_ROOT\"]) page_url =", "make_response(cal.to_ical().decode('utf-8')) response.headers[\"Content-Disposition\"] = (\"attachment; \" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response def", "url_for(\"ics\", idxes=\"_\".join(idxes), _external=True) return render_template(\"form.html\", url=page_url, dest=url, qr=qr(url)) @app.route('/<string:idxes>/cal.ics') def ics(idxes): tts =", "import Calendar, Event, vText import qrcode import qrcode.image.svg from io import BytesIO app", "lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\") def get_current_timetable(idxes, weeks=10): now =", "qrcode.image.svg from io import BytesIO app = Flask(__name__) app.config[\"APPLICATION_ROOT\"] = \"/ttics/\" app.config['SERVER_NAME'] =", "datetime.timedelta(weeks=weeks) fromdate = dt_to_Ymd(now - delta) todate = dt_to_Ymd(now + delta) return get_timetable(idxes,", "def qr(data): factory = qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io = BytesIO() img.save(io)", "render_template, abort, send_from_directory, url_for, g, request, jsonify, redirect, make_response) import requests import re", "def page_to_idxes(url): m = re.match( r\"http(s?)://(www\\.)?hse.ru/(org/persons/\\d+|staff/\\w+)\", url) if not m: raise MyError( \"{url}", "in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out def dt_to_Ymd(dt): return dt.strftime(\"%Y.%m.%d\")", "lesson_to_event(lesson): ev = Event() date = lesson['date'] begin = lesson['beginLesson'] end = lesson['endLesson']", "= BytesIO() img.save(io) return io.getvalue().decode(\"utf-8\") def get_timetable(idxes, fromdate, todate): entrypoint = \"https://www.hse.ru/api/timetable/lessons\" out", "url) if not m: raise MyError( \"{url} doesn't look like HSE professor personal", "like HSE professor personal page\".format( url=url )) url_tt = m.group(0) + \"/timetable\" page", "from icalendar import Calendar, Event, vText import qrcode import qrcode.image.svg from io import", "[] for idx in idxes.split(\"_\"): out.append(requests.get(entrypoint, params=dict(fromdate=fromdate, todate=todate, lectureroid=idx, receiverType='1')).json()) return out def", "delta) return get_timetable(idxes, fromdate, todate) def lesson_to_event(lesson): ev = Event() date = lesson['date']", "rootdir=app.config[\"APPLICATION_ROOT\"]) page_url = request.form.get('url') try: idxes = page_to_idxes(page_url) except MyError as err: return", ")) return out def qr(data): factory = qrcode.image.svg.SvgImage img = qrcode.make(data, image_factory=factory) io", "page {url_tt}\".format( url_tt=url_tt )) return out def qr(data): factory = qrcode.image.svg.SvgImage img =", "\" \"filename=calendar.ics\") response.headers[\"Content-Type\"] = \"text/calendar; charset=utf-8\" return response def page_to_idxes(url): m = re.match(" ]
[ "= self.get_response(request) return response def process_view(self, request, view_func, view_args, view_kwargs): path = request.path_info.lstrip('/')", "flag1 and flag2: return redirect('main:dashboard') elif flag1 or flag2: return None return redirect(settings.LOGIN_URL)", "None flag1 = True try: email = request.session['useremail'] phone_number = request.session['phone_number'] except KeyError:", "-1: return None if path == 'sign-out': return None flag1 = True try:", "settings from django.shortcuts import redirect from django.contrib.auth import logout class LoginRequiredMiddleware: def __init__(self,get_response):", "django.shortcuts import redirect from django.contrib.auth import logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response =", "self.get_response(request) return response def process_view(self, request, view_func, view_args, view_kwargs): path = request.path_info.lstrip('/') print(path)", "flag2 = False if path in settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1 and", "UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request) return response", "= request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1: return None if path == 'sign-out':", "!= -1: return None if path == 'sign-out': return None flag1 = True", "flag1 = True try: email = request.session['useremail'] phone_number = request.session['phone_number'] except KeyError: flag1", "request.session['useremail'] phone_number = request.session['phone_number'] except KeyError: flag1 = False flag2 = False if", "path in settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1 and flag2: return redirect('main:dashboard') elif", "or flag2: return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response", "True if flag1 and flag2: return redirect('main:dashboard') elif flag1 or flag2: return None", "django.contrib.auth import logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response", "return None flag1 = True try: email = request.session['useremail'] phone_number = request.session['phone_number'] except", "flag2: return redirect('main:dashboard') elif flag1 or flag2: return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware:", "def process_view(self, request, view_func, view_args, view_kwargs): path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\") !=", "= True try: email = request.session['useremail'] phone_number = request.session['phone_number'] except KeyError: flag1 =", "import re from django.conf import settings from django.shortcuts import redirect from django.contrib.auth import", "import settings from django.shortcuts import redirect from django.contrib.auth import logout class LoginRequiredMiddleware: def", "'sign-out': return None flag1 = True try: email = request.session['useremail'] phone_number = request.session['phone_number']", "= get_response def __call__(self,request): response = self.get_response(request) return response def process_view(self, request, view_func,", "return response def process_view(self, request, view_func, view_args, view_kwargs): path = request.path_info.lstrip('/') print(path) if", "if flag1 and flag2: return redirect('main:dashboard') elif flag1 or flag2: return None return", "class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request) return", "False flag2 = False if path in settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1", "return redirect('main:dashboard') elif flag1 or flag2: return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def", "in settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1 and flag2: return redirect('main:dashboard') elif flag1", "__call__(self,request): response = self.get_response(request) return response def process_view(self, request, view_func, view_args, view_kwargs): path", "flag2 = True if flag1 and flag2: return redirect('main:dashboard') elif flag1 or flag2:", "= False if path in settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1 and flag2:", "logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request)", "== 'sign-out': return None flag1 = True try: email = request.session['useremail'] phone_number =", "import redirect from django.contrib.auth import logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response", "flag2: return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def", "= False flag2 = False if path in settings.LOGIN_EXEMPT_URLS: flag2 = True if", "response def process_view(self, request, view_func, view_args, view_kwargs): path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\")", "if path == 'sign-out': return None flag1 = True try: email = request.session['useremail']", "re from django.conf import settings from django.shortcuts import redirect from django.contrib.auth import logout", "KeyError: flag1 = False flag2 = False if path in settings.LOGIN_EXEMPT_URLS: flag2 =", "True try: email = request.session['useremail'] phone_number = request.session['phone_number'] except KeyError: flag1 = False", "print(path) if path.find(\"admin/\") != -1: return None if path == 'sign-out': return None", "view_kwargs): path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1: return None if path", "False if path in settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1 and flag2: return", "view_func, view_args, view_kwargs): path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1: return None", "phone_number = request.session['phone_number'] except KeyError: flag1 = False flag2 = False if path", "settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1 and flag2: return redirect('main:dashboard') elif flag1 or", "def __call__(self,request): response = self.get_response(request) return response def process_view(self, request, view_func, view_args, view_kwargs):", "= True if flag1 and flag2: return redirect('main:dashboard') elif flag1 or flag2: return", "request, view_func, view_args, view_kwargs): path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1: return", "return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request):", "from django.shortcuts import redirect from django.contrib.auth import logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response", "redirect('main:dashboard') elif flag1 or flag2: return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response):", "import logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response =", "None if path == 'sign-out': return None flag1 = True try: email =", "redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request)", "and flag2: return redirect('main:dashboard') elif flag1 or flag2: return None return redirect(settings.LOGIN_URL) class", "if path.find(\"admin/\") != -1: return None if path == 'sign-out': return None flag1", "get_response def __call__(self,request): response = self.get_response(request) return response def process_view(self, request, view_func, view_args,", "email = request.session['useremail'] phone_number = request.session['phone_number'] except KeyError: flag1 = False flag2 =", "LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request) return response", "<reponame>topdeveloper424/epatient-django import re from django.conf import settings from django.shortcuts import redirect from django.contrib.auth", "process_view(self, request, view_func, view_args, view_kwargs): path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1:", "response = self.get_response(request) return response def process_view(self, request, view_func, view_args, view_kwargs): path =", "path.find(\"admin/\") != -1: return None if path == 'sign-out': return None flag1 =", "from django.contrib.auth import logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request):", "= request.session['phone_number'] except KeyError: flag1 = False flag2 = False if path in", "path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1: return None if path ==", "return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response =", "try: email = request.session['useremail'] phone_number = request.session['phone_number'] except KeyError: flag1 = False flag2", "= request.session['useremail'] phone_number = request.session['phone_number'] except KeyError: flag1 = False flag2 = False", "elif flag1 or flag2: return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response", "view_args, view_kwargs): path = request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1: return None if", "def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request) return response def", "django.conf import settings from django.shortcuts import redirect from django.contrib.auth import logout class LoginRequiredMiddleware:", "from django.conf import settings from django.shortcuts import redirect from django.contrib.auth import logout class", "path == 'sign-out': return None flag1 = True try: email = request.session['useremail'] phone_number", "None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response", "if path in settings.LOGIN_EXEMPT_URLS: flag2 = True if flag1 and flag2: return redirect('main:dashboard')", "self.get_response = get_response def __call__(self,request): response = self.get_response(request) return response def process_view(self, request,", "redirect from django.contrib.auth import logout class LoginRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def", "__init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request) return response def process_view(self,", "flag1 = False flag2 = False if path in settings.LOGIN_EXEMPT_URLS: flag2 = True", "flag1 or flag2: return None return redirect(settings.LOGIN_URL) class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response =", "request.path_info.lstrip('/') print(path) if path.find(\"admin/\") != -1: return None if path == 'sign-out': return", "class UserTypeRequiredMiddleware: def __init__(self,get_response): self.get_response = get_response def __call__(self,request): response = self.get_response(request) return", "except KeyError: flag1 = False flag2 = False if path in settings.LOGIN_EXEMPT_URLS: flag2", "return None if path == 'sign-out': return None flag1 = True try: email", "request.session['phone_number'] except KeyError: flag1 = False flag2 = False if path in settings.LOGIN_EXEMPT_URLS:" ]
[ "file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\"", "#!/usr/bin/env python # -*- coding: utf-8 -*- import os import pickle import pandas", "links user info y todos los dwc_terms #Herramienta para normalizar horas #Georreferenciación #Guardar", "de datos\\n Este debe ser un valor unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df)", "se deben leer para refresh data base igual, esto queda para refresh data", "as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura", "for verbatimFieldName in dataframe_columns: for stdFieldName in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName))", "# mejor no ya que al guarar listas de columnas solo debo hacer", "pd import easygui as eg from collections import defaultdict from PyQt5 import QtCore", "darwinizer, # mejor no ya que al guarar listas de columnas solo debo", "borran columnas sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'):", "leer para refresh data base igual, esto queda para refresh data base #Comparar", "as dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura del archivo, con el index correcto,", "= pickle.load(f) not_recommended_labels=[] for labels in df_columns: if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels)", "nuevamente #Esta función debe ir en refresh data base o en darwinizer, #", ", except: print(\"No hemos podido encontrar la ruta del archivo Excel\") try: data.dropna(axis=1,", "quedando en script refresh data base #leer columnas de lista de dwc_file, visitors_file,", "con el index correcto, se borran columnas sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx')", "i <= len(df_columns)-1: if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f", "#Esta función debe ir en refresh data base o en darwinizer, # mejor", "exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close()", "df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <=", "return pickle.load(dict_file) def file_opener(self): #Apertura del archivo, con el index correcto, se borran", "f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) listWidget.addItems(df_columns)", "el index correcto, se borran columnas sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or", "pass #Visitors va a seguir quedando en script refresh data base #leer columnas", "-*- import os import pickle import pandas as pd import easygui as eg", "y otras para visitor # así ya esta predefinido cuando se abra nuevamente", "#generar una lista que contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName for", "#quiero guardar una lista con columnas de dwc del archivo y otras para", "predefinido cuando se abra nuevamente #Esta función debe ir en refresh data base", "para visitor # así ya esta predefinido cuando se abra nuevamente #Esta función", "df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1", "pickle.load(dict_file) def file_opener(self): #Apertura del archivo, con el index correcto, se borran columnas", "elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o , except: print(\"No hemos", "data base #Comparar columnas dwcorear #quiero guardar una lista con columnas de dwc", "debo hacer esto una vez #Borar dynamic links user info y todos los", "guarar listas de columnas solo debo hacer esto una vez #Borar dynamic links", "import defaultdict from PyQt5 import QtCore as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label", "correcto, se borran columnas sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0)", "especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row()", "base #leer columnas de lista de dwc_file, visitors_file, estas se deben leer para", "de ; o , except: print(\"No hemos podido encontrar la ruta del archivo", "except: print(\"No hemos podido encontrar la ruta del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True)", "data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list)", "f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb')", "selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1: if i", "= open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return", "#Herramienta para normalizar horas #Georreferenciación #Guardar df como csv y ese pasarlo para", "len(df_columns)-1: if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\")", "self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item", "visitors_file, estas se deben leer para refresh data base igual, esto queda para", "con columnas de dwc del archivo y otras para visitor # así ya", "easygui as eg from collections import defaultdict from PyQt5 import QtCore as qtc", "para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for stdFieldName in dwc_terms_keys: if", "msg=\"Seleccione una columna para ser el indice de la base de datos\\n Este", "dwc del archivo y otras para visitor # así ya esta predefinido cuando", "abra nuevamente #Esta función debe ir en refresh data base o en darwinizer,", "así ya esta predefinido cuando se abra nuevamente #Esta función debe ir en", "otras para visitor # así ya esta predefinido cuando se abra nuevamente #Esta", "estas se deben leer para refresh data base igual, esto queda para refresh", "o en darwinizer, # mejor no ya que al guarar listas de columnas", "if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items =", "unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return data def", "columnas de dwc del archivo y otras para visitor # así ya esta", "debe ser un valor unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop =", "en script refresh data base #leer columnas de lista de dwc_file, visitors_file, estas", "visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns def", "# así ya esta predefinido cuando se abra nuevamente #Esta función debe ir", "hacer esto una vez #Borar dynamic links user info y todos los dwc_terms", "f.close() listWidget.clear() def sensitive_data(self): pass #Visitors va a seguir quedando en script refresh", "in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True) return df_columns", "file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de", "data def darwinizer(self): #Encuentra match entre el df y el diccionario dataframe=self.file_opener() #proveniente", "labels in df_columns: if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in", "verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione", "esto queda para refresh data base #Comparar columnas dwcorear #quiero guardar una lista", "cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict()", "not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear()", "a seguir quedando en script refresh data base #leer columnas de lista de", "with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns):", "open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns =", "#Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as dict_file: return pickle.load(dict_file) def", "archivo y otras para visitor # así ya esta predefinido cuando se abra", "i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close()", "selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self):", "i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close()", "dataframe=self.file_opener() #proveniente de la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista", "import os import pickle import pandas as pd import easygui as eg from", "if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it", "dwc_terms #Herramienta para normalizar horas #Georreferenciación #Guardar df como csv y ese pasarlo", "return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i", "listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if i not in", "columnas dwcorear #quiero guardar una lista con columnas de dwc del archivo y", "index correcto, se borran columnas sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'):", "columna para ser el indice de la base de datos\\n Este debe ser", "i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it else:", "def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with", "base de datos\\n Este debe ser un valor unico para cada especimen\" title=\"Seleccion\"", "dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as dict_file: return pickle.load(dict_file)", "os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget): with", "va a seguir quedando en script refresh data base #leer columnas de lista", "in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def", "de dwc del archivo y otras para visitor # así ya esta predefinido", "not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it else: pass", "todos los dwc_terms #Herramienta para normalizar horas #Georreferenciación #Guardar df como csv y", "match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser el indice", "x in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if i", "listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for", "return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if not selected_indexes:", "stdFieldName in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list", "data=data.set_index(indexo, drop = True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in", "self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna", "item in matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()]", "file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o , except: print(\"No hemos podido", "in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if i not", "'rb') as dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura del archivo, con el index", "f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass #Visitors va a seguir", "de la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que contenga", "i in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True) return", "#Borar dynamic links user info y todos los dwc_terms #Herramienta para normalizar horas", "as f: df_columns = pickle.load(f) not_recommended_labels=[] for labels in df_columns: if labels not", "os import pickle import pandas as pd import easygui as eg from collections", "darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para", "with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) not_recommended_labels=[] for labels in df_columns:", "una columna para ser el indice de la base de datos\\n Este debe", "base igual, esto queda para refresh data base #Comparar columnas dwcorear #quiero guardar", "ser el indice de la base de datos\\n Este debe ser un valor", "se abra nuevamente #Esta función debe ir en refresh data base o en", "dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura del", "= open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as", "selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget):", "para normalizar horas #Georreferenciación #Guardar df como csv y ese pasarlo para refreshdatabase", "dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser el indice de la", "y todos los dwc_terms #Herramienta para normalizar horas #Georreferenciación #Guardar df como csv", "i=0 while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"),", "listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while", "print(\"No hemos podido encontrar la ruta del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except:", "listas de columnas solo debo hacer esto una vez #Borar dynamic links user", "pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def", ", 'rb') as dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura del archivo, con el", "for x in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if", "qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del", "else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True)", "del archivo y otras para visitor # así ya esta predefinido cuando se", "drop = True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()]", "listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return", "f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear()", "listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items:", "<= len(df_columns)-1: if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f =", "open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data", "def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1:", "de dwc_file, visitors_file, estas se deben leer para refresh data base igual, esto", "method not proud of it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f =", "#Encuentra match entre el df y el diccionario dataframe=self.file_opener() #proveniente de la funcion", "set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser el indice de la base de", "igual, esto queda para refresh data base #Comparar columnas dwcorear #quiero guardar una", "ya que al guarar listas de columnas solo debo hacer esto una vez", "verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for stdFieldName in", "import QtCore as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def", "pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns", "if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ;", "in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1: if i not in selected_indexes:", "contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for", "import pickle import pandas as pd import easygui as eg from collections import", "podido encontrar la ruta del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass return", "open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row()", "matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns):", "listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i])", "mejor no ya que al guarar listas de columnas solo debo hacer esto", "except: pass return data def darwinizer(self): #Encuentra match entre el df y el", "data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o , except: print(\"No hemos podido encontrar", "esta predefinido cuando se abra nuevamente #Esta función debe ir en refresh data", "para refresh data base igual, esto queda para refresh data base #Comparar columnas", "encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for stdFieldName in dwc_terms_keys: if verbatimFieldName", "la base de datos\\n Este debe ser un valor unico para cada especimen\"", "darwinizer(self): #Encuentra match entre el df y el diccionario dataframe=self.file_opener() #proveniente de la", "in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for", "in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una", "dynamic links user info y todos los dwc_terms #Herramienta para normalizar horas #Georreferenciación", "i <= len(df_columns)-1: if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f", "self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as", "eg from collections import defaultdict from PyQt5 import QtCore as qtc class file_entry():", "exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",", "column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"),", "visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1: if", "seguir quedando en script refresh data base #leer columnas de lista de dwc_file,", "column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while", "que al guarar listas de columnas solo debo hacer esto una vez #Borar", "lista de dwc_file, visitors_file, estas se deben leer para refresh data base igual,", "un valor unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return", "while i<=len(darwinizer_list)-1: if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud", "in matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[]", "os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f)", "archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass return data def darwinizer(self): #Encuentra match", "variar de ; o , except: print(\"No hemos podido encontrar la ruta del", "base o en darwinizer, # mejor no ya que al guarar listas de", "esto una vez #Borar dynamic links user info y todos los dwc_terms #Herramienta", "file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o", "from collections import defaultdict from PyQt5 import QtCore as qtc class file_entry(): def", "import pandas as pd import easygui as eg from collections import defaultdict from", "diccionario dataframe=self.file_opener() #proveniente de la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una", "x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1: if i not in", "una lista que contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName", "def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns", "i<=len(darwinizer_list)-1: if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of", "de columnas solo debo hacer esto una vez #Borar dynamic links user info", "la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que contenga tuplas", "del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as dict_file: return pickle.load(dict_file) def file_opener(self):", "data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o , except: print(\"No", "how='all',inplace=True) except: pass return data def darwinizer(self): #Encuentra match entre el df y", "para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list):", "archivo, con el index correcto, se borran columnas sin datos try: file_path=self.route_response_label if", "df y el diccionario dataframe=self.file_opener() #proveniente de la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist()", "df_columns = pickle.load(f) not_recommended_labels=[] for labels in df_columns: if labels not in self.dwc_terms.keys():", "de lista de dwc_file, visitors_file, estas se deben leer para refresh data base", "f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f:", "deben leer para refresh data base igual, esto queda para refresh data base", "base #Comparar columnas dwcorear #quiero guardar una lista con columnas de dwc del", "pass return data def darwinizer(self): #Encuentra match entre el df y el diccionario", "#Comparar columnas dwcorear #quiero guardar una lista con columnas de dwc del archivo", "item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while", "selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it else: pass i=i+1 data=data.rename(columns=column_dict)", "data base o en darwinizer, # mejor no ya que al guarar listas", "for item in matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in", "#ver como variar de ; o , except: print(\"No hemos podido encontrar la", "en refresh data base o en darwinizer, # mejor no ya que al", "dataframe_columns: for stdFieldName in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match", "file_opener(self): #Apertura del archivo, con el index correcto, se borran columnas sin datos", "else: i=0 while i<=len(darwinizer_list)-1: if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method", "tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for stdFieldName", "pandas as pd import easygui as eg from collections import defaultdict from PyQt5", "dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas", "el df y el diccionario dataframe=self.file_opener() #proveniente de la funcion file opener dwc_terms_keys=self.dwc_terms.keys()", "x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1: if i not in", "#Diccionario def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as dict_file:", "opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName #iterador para", "listWidget.clear() #return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f)", "in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1: if i not in selected_indexes:", "df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass", "datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como", "title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for", "for i in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True)", "column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this", "file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName #iterador", "cuando se abra nuevamente #Esta función debe ir en refresh data base o", "dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1: if", "ya esta predefinido cuando se abra nuevamente #Esta función debe ir en refresh", "import easygui as eg from collections import defaultdict from PyQt5 import QtCore as", "#Visitors va a seguir quedando en script refresh data base #leer columnas de", "dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar", "pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass #Visitors va a seguir quedando en script", "sensitive_data(self): pass #Visitors va a seguir quedando en script refresh data base #leer", "in df_columns: if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels:", "def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1:", "def file_opener(self): #Apertura del archivo, con el index correcto, se borran columnas sin", "del archivo, con el index correcto, se borran columnas sin datos try: file_path=self.route_response_label", "exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as", "queda para refresh data base #Comparar columnas dwcorear #quiero guardar una lista con", "visitor # así ya esta predefinido cuando se abra nuevamente #Esta función debe", "se borran columnas sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif", "valor unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return data", "; o , except: print(\"No hemos podido encontrar la ruta del archivo Excel\")", "return data def darwinizer(self): #Encuentra match entre el df y el diccionario dataframe=self.file_opener()", "#iterador para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for stdFieldName in dwc_terms_keys:", "selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1:", "los dwc_terms #Herramienta para normalizar horas #Georreferenciación #Guardar df como csv y ese", "def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser el indice de la base", "df_columns: if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items", "Este debe ser un valor unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop", "pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0", "df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with", "darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName", "datos\\n Este debe ser un valor unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo,", "QtCore as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self):", "lista que contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in", "in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it else: pass i=i+1", "not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item in", "df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1", "or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o ,", "pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f", "len(df_columns)-1: if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\")", "<= len(df_columns)-1: if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f =", "script refresh data base #leer columnas de lista de dwc_file, visitors_file, estas se", "in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list def", "indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x", "coding: utf-8 -*- import os import pickle import pandas as pd import easygui", "de la base de datos\\n Este debe ser un valor unico para cada", "utf-8 -*- import os import pickle import pandas as pd import easygui as", "#tupla del match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser", "with open(dict_path , 'rb') as dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura del archivo,", "open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass #Visitors va a seguir quedando en", "True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if not", "not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1]", "def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) not_recommended_labels=[] for labels", "if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if i not in selected_indexes:", "del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass return data def darwinizer(self): #Encuentra", "= listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row()", "data base #leer columnas de lista de dwc_file, visitors_file, estas se deben leer", "= open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass #Visitors va a seguir quedando", "pickle import pandas as pd import easygui as eg from collections import defaultdict", "refresh data base igual, esto queda para refresh data base #Comparar columnas dwcorear", "as f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x", "y el diccionario dataframe=self.file_opener() #proveniente de la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[]", "of it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close()", "hemos podido encontrar la ruta del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass", "open(dict_path , 'rb') as dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura del archivo, con", "as pd import easygui as eg from collections import defaultdict from PyQt5 import", "if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f)", "no ya que al guarar listas de columnas solo debo hacer esto una", "i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f =", "o , except: print(\"No hemos podido encontrar la ruta del archivo Excel\") try:", "listWidget.clear() def sensitive_data(self): pass #Visitors va a seguir quedando en script refresh data", "debe ir en refresh data base o en darwinizer, # mejor no ya", "'rb') as f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for", "refresh data base o en darwinizer, # mejor no ya que al guarar", "data base igual, esto queda para refresh data base #Comparar columnas dwcorear #quiero", "try: data.dropna(axis=1, how='all',inplace=True) except: pass return data def darwinizer(self): #Encuentra match entre el", "ir en refresh data base o en darwinizer, # mejor no ya que", "while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True)", "verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for stdFieldName in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName):", "f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget):", "como variar de ; o , except: print(\"No hemos podido encontrar la ruta", "columnas de lista de dwc_file, visitors_file, estas se deben leer para refresh data", "dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else: i=0", "return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i", "selected_indexes: column_dict=dict(darwinizer_list) else: i=0 while i<=len(darwinizer_list)-1: if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix", "collections import defaultdict from PyQt5 import QtCore as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label):", "info y todos los dwc_terms #Herramienta para normalizar horas #Georreferenciación #Guardar df como", "match entre el df y el diccionario dataframe=self.file_opener() #proveniente de la funcion file", "while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True)", "this method not proud of it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f", "que contenga tuplas verbatimFieldName,stdFieldName #iterador para encontrar tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns:", "it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"),", "solo debo hacer esto una vez #Borar dynamic links user info y todos", "función debe ir en refresh data base o en darwinizer, # mejor no", "refresh data base #leer columnas de lista de dwc_file, visitors_file, estas se deben", "la ruta del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass return data def", "exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass #Visitors va a", "vez #Borar dynamic links user info y todos los dwc_terms #Herramienta para normalizar", "if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f)", "dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura", "labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items = listWidget.findItems(i,", "-*- coding: utf-8 -*- import os import pickle import pandas as pd import", "not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear()", "#Fix this method not proud of it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True)", "lista con columnas de dwc del archivo y otras para visitor # así", "pickle.load(f) not_recommended_labels=[] for labels in df_columns: if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns)", "i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",", "Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass return data def darwinizer(self): #Encuentra match entre", "f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns =", "= open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns", "not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i in not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly)", "class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del dict", "i=0 while i<=len(darwinizer_list)-1: if i not in selected_indexes: column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not", "as eg from collections import defaultdict from PyQt5 import QtCore as qtc class", "funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName", "dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list def set_df_index(self,data):", "from PyQt5 import QtCore as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader()", "encontrar la ruta del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass return data", "qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x", "#leer columnas de lista de dwc_file, visitors_file, estas se deben leer para refresh", "def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if not selected_indexes: column_dict=dict(darwinizer_list) else:", "for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1: if i not", "'rb') as f: df_columns = pickle.load(f) not_recommended_labels=[] for labels in df_columns: if labels", "data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) not_recommended_labels=[] for", "defaultdict from PyQt5 import QtCore as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label", "ser un valor unico para cada especimen\" title=\"Seleccion\" indexo=eg.choicebox(msg,title,columns_df) data=data.set_index(indexo, drop = True)", "columnas solo debo hacer esto una vez #Borar dynamic links user info y", "def sensitive_data(self): pass #Visitors va a seguir quedando en script refresh data base", "open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) not_recommended_labels=[] for labels in df_columns: if", "file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o , except:", "columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser el indice de la base de datos\\n", "python # -*- coding: utf-8 -*- import os import pickle import pandas as", "in dataframe_columns: for stdFieldName in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del", "guardar una lista con columnas de dwc del archivo y otras para visitor", "# -*- coding: utf-8 -*- import os import pickle import pandas as pd", "pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f)", "= True) return data def dataframe_label_transformer(self,data,listWidget,darwinizer_list): column_dict=defaultdict() selected_indexes=[x.row() for x in listWidget.selectedIndexes()] if", "dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) not_recommended_labels=[] for labels in", "not proud of it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\")", "if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist()", "matching_items: item.setSelected(True) return df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0", "return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser el indice de", "del match return dataframe,darwinizer_list def set_df_index(self,data): columns_df=data.columns.tolist() msg=\"Seleccione una columna para ser el", "#proveniente de la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar una lista que", "data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f) f.close() os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\")", "una lista con columnas de dwc del archivo y otras para visitor #", "os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def visitors_label_filler(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb')", "ruta del archivo Excel\") try: data.dropna(axis=1, how='all',inplace=True) except: pass return data def darwinizer(self):", "sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver", "i=0 while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_visitors_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"),", "indice de la base de datos\\n Este debe ser un valor unico para", "for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1: if i not", "i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass #Visitors", "__init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path", "def darwinizer(self): #Encuentra match entre el df y el diccionario dataframe=self.file_opener() #proveniente de", "df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()]", "PyQt5 import QtCore as qtc class file_entry(): def __init__(self,route_response_label,route_destiny_label): self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario", "#return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f: df_columns = pickle.load(f) not_recommended_labels=[]", "for labels in df_columns: if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for i", "user info y todos los dwc_terms #Herramienta para normalizar horas #Georreferenciación #Guardar df", "def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb') as dict_file: return", "al guarar listas de columnas solo debo hacer esto una vez #Borar dynamic", "data.dropna(axis=1, how='all',inplace=True) except: pass return data def darwinizer(self): #Encuentra match entre el df", "para ser el indice de la base de datos\\n Este debe ser un", "open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\",\"wb\") pickle.dump(data.columns.tolist(),f) f.close() listWidget.clear() #return data def dwc_label_checker(self,listWidget): with open(f\"{self.route_destiny_label}\\dwc_terms\\df_columns_renamed.pkl\", 'rb') as f:", "selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[] i=0 while i <= len(df_columns)-1: if i", "f: df_columns = pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in", "columnas sin datos try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';')", "= pickle.load(f) listWidget.addItems(df_columns) return df_columns def visitors_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_visitors_labels=[]", "verbatimFieldName in dataframe_columns: for stdFieldName in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla", "proud of it else: pass i=i+1 data=data.rename(columns=column_dict) os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_column_dict_rename.pkl\",\"wb\") pickle.dump(column_dict,f)", "para refresh data base #Comparar columnas dwcorear #quiero guardar una lista con columnas", "self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path , 'rb')", "in selected_indexes: df_selected_dwc_labels.append(df_columns[i]) i=i+1 os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_dwc_labels.pkl\",\"wb\") pickle.dump(df_selected_dwc_labels,f) f.close() listWidget.clear() def", "f: df_columns = pickle.load(f) not_recommended_labels=[] for labels in df_columns: if labels not in", "el diccionario dataframe=self.file_opener() #proveniente de la funcion file opener dwc_terms_keys=self.dwc_terms.keys() dataframe_columns=dataframe.columns.tolist() darwinizer_list=[] #generar", "listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <= len(df_columns)-1: if i not in selected_indexes: df_selected_dwc_labels.append(df_columns[i])", "self.route_response_label=route_response_label self.route_destiny_label=route_destiny_label self.dwc_terms=self.dict_loader() #Diccionario def dict_loader(self): #Apertura del dict dict_path=r\"documents\\dwc_terms\\dwc_fieldName_dict.pkl\" with open(dict_path ,", "df_columns def dwc_label_transformer(self,listWidget,df_columns): selected_indexes=[x.row() for x in listWidget.selectedIndexes()] df_selected_dwc_labels=[] i=0 while i <=", "tuplas verbatimFieldName,stdFieldName for verbatimFieldName in dataframe_columns: for stdFieldName in dwc_terms_keys: if verbatimFieldName in", "try: file_path=self.route_response_label if file_path.endswith('.xlsx') or file_path.endswith('.xls'): data=pd.read_excel(file_path,header=0) elif file_path.endswith('.csv'): data=pd.read_csv(file_path,header=0,sep=';') #ver como variar", "dict_file: return pickle.load(dict_file) def file_opener(self): #Apertura del archivo, con el index correcto, se", "#Apertura del archivo, con el index correcto, se borran columnas sin datos try:", "not_recommended_labels=[] for labels in df_columns: if labels not in self.dwc_terms.keys(): not_recommended_labels.append(labels) listWidget.addItems(df_columns) for", "os.makedirs(os.path.dirname(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\"), exist_ok=True) f = open(f\"{self.route_destiny_label}\\dwc_terms\\df_selected_visitors_labels.pkl\",\"wb\") pickle.dump(df_selected_visitors_labels,f) f.close() listWidget.clear() def sensitive_data(self): pass #Visitors va", "refresh data base #Comparar columnas dwcorear #quiero guardar una lista con columnas de", "entre el df y el diccionario dataframe=self.file_opener() #proveniente de la funcion file opener", "el indice de la base de datos\\n Este debe ser un valor unico", "una vez #Borar dynamic links user info y todos los dwc_terms #Herramienta para", "not_recommended_labels: matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly) for item in matching_items: item.setSelected(True) return df_columns def", "dwcorear #quiero guardar una lista con columnas de dwc del archivo y otras", "for stdFieldName in dwc_terms_keys: if verbatimFieldName in self.dwc_terms.get(stdFieldName): darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match return", "dwc_file, visitors_file, estas se deben leer para refresh data base igual, esto queda", "en darwinizer, # mejor no ya que al guarar listas de columnas solo" ]
[ "Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" }", "Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific = 0x01 All = 0x02", "= 0 RED = 1 GREEN = 2 BLUE = 3 BLACK =", "hsv[0] and hsv[0] <= 30) \\ and (20 <= hsv[1] and hsv[1] <=", "# 色コード UNKNOWN = 0 RED = 1 GREEN = 2 BLUE =", "hsv[0] <= 30) \\ and (20 <= hsv[1] and hsv[1] <= 255) \\", "BLACK = 4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = {", "def toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\",", "0x01 All = 0x02 # レスポンスコード定義 class ResponseCode(): Color = 0x51 Error =", "\\ and (60 <= hsv[1] and hsv[1] <= 255) \\ and (60 <=", "<= 90) \\ and (50 <= hsv[1] and hsv[1] <= 255) \\ and", "YELLOW = 5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED:", "(0 <= hsv[2] and hsv[2] <= 20): return Color.BLACK if (0 <= hsv[0]", "hsv[1] <= 255) \\ and (20 <= hsv[2] and hsv[2] <= 255): return", "BLUE = 3 BLACK = 4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド def toColorName(code):", "<= 15) \\ and (20 <= hsv[1] and hsv[1] <= 255) \\ and", "255): return Color.BLUE if (45 <= hsv[0] and hsv[0] <= 90) \\ and", "hsv[2] and hsv[2] <= 255): return Color.GREEN if (20 <= hsv[0] and hsv[0]", "Color(): # 色コード UNKNOWN = 0 RED = 1 GREEN = 2 BLUE", "return Color.BLACK if (0 <= hsv[0] and hsv[0] <= 15) \\ and (20", "hsv[2] and hsv[2] <= 255): return Color.BLUE if (45 <= hsv[0] and hsv[0]", "hsv[0] <= 115) \\ and (60 <= hsv[1] and hsv[1] <= 255) \\", "= 2 BLUE = 3 BLACK = 4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド", "(50 <= hsv[1] and hsv[1] <= 255) \\ and (50 <= hsv[2] and", "# HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <= hsv[2] and hsv[2] <= 20): return", "<= 255) \\ and (20 <= hsv[2] and hsv[2] <= 255): return Color.YELLOW", "hsv[1] and hsv[1] <= 255) \\ and (50 <= hsv[2] and hsv[2] <=", "coding: utf-8 # 色に関する定義 class Color(): # 色コード UNKNOWN = 0 RED =", "and hsv[2] <= 20): return Color.BLACK if (0 <= hsv[0] and hsv[0] <=", "hsv[1] <= 255) \\ and (50 <= hsv[2] and hsv[2] <= 255): return", "<= 115) \\ and (60 <= hsv[1] and hsv[1] <= 255) \\ and", "and hsv[2] <= 255): return Color.GREEN if (20 <= hsv[0] and hsv[0] <=", "255): return Color.GREEN if (20 <= hsv[0] and hsv[0] <= 30) \\ and", "and (20 <= hsv[2] and hsv[2] <= 255): return Color.YELLOW return Color.UNKNOWN #", "Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code]", "<= hsv[2] and hsv[2] <= 255): return Color.GREEN if (20 <= hsv[0] and", "(50 <= hsv[2] and hsv[2] <= 255): return Color.GREEN if (20 <= hsv[0]", "Specific = 0x01 All = 0x02 # レスポンスコード定義 class ResponseCode(): Color = 0x51", "All = 0x02 # レスポンスコード定義 class ResponseCode(): Color = 0x51 Error = 0xC8", "= { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW:", "hsv[1] and hsv[1] <= 255) \\ and (20 <= hsv[2] and hsv[2] <=", "<= 20): return Color.BLACK if (0 <= hsv[0] and hsv[0] <= 15) \\", "Color.BLUE if (45 <= hsv[0] and hsv[0] <= 90) \\ and (50 <=", "return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <= hsv[2] and hsv[2] <=", "and (20 <= hsv[1] and hsv[1] <= 255) \\ and (20 <= hsv[2]", "\"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <= hsv[2] and", "<= hsv[0] and hsv[0] <= 30) \\ and (20 <= hsv[1] and hsv[1]", "and hsv[0] <= 115) \\ and (60 <= hsv[1] and hsv[1] <= 255)", "<= hsv[2] and hsv[2] <= 255): return Color.BLUE if (45 <= hsv[0] and", "hsv[2] <= 255): return Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific =", "コマンドコード定義 class CommandCode(): Specific = 0x01 All = 0x02 # レスポンスコード定義 class ResponseCode():", "and hsv[2] <= 255): return Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific", "色に関する定義 class Color(): # 色コード UNKNOWN = 0 RED = 1 GREEN =", "<= 255) \\ and (20 <= hsv[2] and hsv[2] <= 255): return Color.RED", "\\ and (20 <= hsv[2] and hsv[2] <= 255): return Color.RED if (100", "(20 <= hsv[2] and hsv[2] <= 255): return Color.YELLOW return Color.UNKNOWN # コマンドコード定義", "and (50 <= hsv[1] and hsv[1] <= 255) \\ and (50 <= hsv[2]", "if (100 <= hsv[0] and hsv[0] <= 115) \\ and (60 <= hsv[1]", "<= 255): return Color.GREEN if (20 <= hsv[0] and hsv[0] <= 30) \\", "<= hsv[0] and hsv[0] <= 15) \\ and (20 <= hsv[1] and hsv[1]", "HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <= hsv[2] and hsv[2] <= 20): return Color.BLACK", "<= 255) \\ and (50 <= hsv[2] and hsv[2] <= 255): return Color.GREEN", "} return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <= hsv[2] and hsv[2]", "<= hsv[2] and hsv[2] <= 255): return Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class", "= 4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = { Color.UNKNOWN:", "hsv[1] and hsv[1] <= 255) \\ and (60 <= hsv[2] and hsv[2] <=", "= 0x01 All = 0x02 # レスポンスコード定義 class ResponseCode(): Color = 0x51 Error", "RED = 1 GREEN = 2 BLUE = 3 BLACK = 4 YELLOW", "and (20 <= hsv[2] and hsv[2] <= 255): return Color.RED if (100 <=", "(20 <= hsv[1] and hsv[1] <= 255) \\ and (20 <= hsv[2] and", "<= 30) \\ and (20 <= hsv[1] and hsv[1] <= 255) \\ and", "<= 255): return Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific = 0x01", "hsv[2] and hsv[2] <= 20): return Color.BLACK if (0 <= hsv[0] and hsv[0]", "255) \\ and (60 <= hsv[2] and hsv[2] <= 255): return Color.BLUE if", "255) \\ and (20 <= hsv[2] and hsv[2] <= 255): return Color.RED if", "(20 <= hsv[2] and hsv[2] <= 255): return Color.RED if (100 <= hsv[0]", "COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <= hsv[2] and hsv[2] <= 20):", "<= 255) \\ and (60 <= hsv[2] and hsv[2] <= 255): return Color.BLUE", "hsv[0] and hsv[0] <= 90) \\ and (50 <= hsv[1] and hsv[1] <=", "0 RED = 1 GREEN = 2 BLUE = 3 BLACK = 4", "<= hsv[1] and hsv[1] <= 255) \\ and (20 <= hsv[2] and hsv[2]", "if (0 <= hsv[0] and hsv[0] <= 15) \\ and (20 <= hsv[1]", "\\ and (50 <= hsv[2] and hsv[2] <= 255): return Color.GREEN if (20", "COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\",", "Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv):", "<= hsv[2] and hsv[2] <= 20): return Color.BLACK if (0 <= hsv[0] and", "(100 <= hsv[0] and hsv[0] <= 115) \\ and (60 <= hsv[1] and", "class CommandCode(): Specific = 0x01 All = 0x02 # レスポンスコード定義 class ResponseCode(): Color", "= 3 BLACK = 4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME", "hsv[0] <= 90) \\ and (50 <= hsv[1] and hsv[1] <= 255) \\", "\"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def", "<= hsv[1] and hsv[1] <= 255) \\ and (60 <= hsv[2] and hsv[2]", "UNKNOWN = 0 RED = 1 GREEN = 2 BLUE = 3 BLACK", "(20 <= hsv[0] and hsv[0] <= 30) \\ and (20 <= hsv[1] and", "and hsv[2] <= 255): return Color.RED if (100 <= hsv[0] and hsv[0] <=", "return Color.BLUE if (45 <= hsv[0] and hsv[0] <= 90) \\ and (50", "<= hsv[0] and hsv[0] <= 115) \\ and (60 <= hsv[1] and hsv[1]", "CommandCode(): Specific = 0x01 All = 0x02 # レスポンスコード定義 class ResponseCode(): Color =", "{ Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\"", "15) \\ and (20 <= hsv[1] and hsv[1] <= 255) \\ and (20", "= 5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\",", "and hsv[2] <= 255): return Color.BLUE if (45 <= hsv[0] and hsv[0] <=", "255): return Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific = 0x01 All", "<= hsv[1] and hsv[1] <= 255) \\ and (50 <= hsv[2] and hsv[2]", "def getColor(hsv): if (0 <= hsv[2] and hsv[2] <= 20): return Color.BLACK if", "\"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <=", "Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい)", "# coding: utf-8 # 色に関する定義 class Color(): # 色コード UNKNOWN = 0 RED", "hsv[2] and hsv[2] <= 255): return Color.RED if (100 <= hsv[0] and hsv[0]", "utf-8 # 色に関する定義 class Color(): # 色コード UNKNOWN = 0 RED = 1", "\\ and (50 <= hsv[1] and hsv[1] <= 255) \\ and (50 <=", "hsv[2] <= 20): return Color.BLACK if (0 <= hsv[0] and hsv[0] <= 15)", "\\ and (20 <= hsv[1] and hsv[1] <= 255) \\ and (20 <=", "and hsv[1] <= 255) \\ and (60 <= hsv[2] and hsv[2] <= 255):", "\"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return", "255) \\ and (50 <= hsv[2] and hsv[2] <= 255): return Color.GREEN if", "return Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific = 0x01 All =", "and hsv[1] <= 255) \\ and (20 <= hsv[2] and hsv[2] <= 255):", "hsv[0] <= 15) \\ and (20 <= hsv[1] and hsv[1] <= 255) \\", "<= 255): return Color.RED if (100 <= hsv[0] and hsv[0] <= 115) \\", "4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\",", "Color.BLACK if (0 <= hsv[0] and hsv[0] <= 15) \\ and (20 <=", "if (0 <= hsv[2] and hsv[2] <= 20): return Color.BLACK if (0 <=", "and hsv[0] <= 30) \\ and (20 <= hsv[1] and hsv[1] <= 255)", "getColor(hsv): if (0 <= hsv[2] and hsv[2] <= 20): return Color.BLACK if (0", "30) \\ and (20 <= hsv[1] and hsv[1] <= 255) \\ and (20", "# 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\",", "class Color(): # 色コード UNKNOWN = 0 RED = 1 GREEN = 2", "#!/usr/bin/env python # coding: utf-8 # 色に関する定義 class Color(): # 色コード UNKNOWN =", "<reponame>arimakaoru/CameraSystem #!/usr/bin/env python # coding: utf-8 # 色に関する定義 class Color(): # 色コード UNKNOWN", "<= hsv[2] and hsv[2] <= 255): return Color.RED if (100 <= hsv[0] and", "1 GREEN = 2 BLUE = 3 BLACK = 4 YELLOW = 5", "python # coding: utf-8 # 色に関する定義 class Color(): # 色コード UNKNOWN = 0", "Color.RED if (100 <= hsv[0] and hsv[0] <= 115) \\ and (60 <=", "toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK:", "and (60 <= hsv[2] and hsv[2] <= 255): return Color.BLUE if (45 <=", "return Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific = 0x01 All = 0x02 #", "90) \\ and (50 <= hsv[1] and hsv[1] <= 255) \\ and (50", "and (50 <= hsv[2] and hsv[2] <= 255): return Color.GREEN if (20 <=", "255) \\ and (20 <= hsv[2] and hsv[2] <= 255): return Color.YELLOW return", "\"RED\", Color.GREEN: \"GREEN\", Color.BLUE: \"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] #", "GREEN = 2 BLUE = 3 BLACK = 4 YELLOW = 5 #", "\"BLUE\", Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if", "\\ and (60 <= hsv[2] and hsv[2] <= 255): return Color.BLUE if (45", "# 色に関する定義 class Color(): # 色コード UNKNOWN = 0 RED = 1 GREEN", "(60 <= hsv[1] and hsv[1] <= 255) \\ and (60 <= hsv[2] and", "# コマンドコード定義 class CommandCode(): Specific = 0x01 All = 0x02 # レスポンスコード定義 class", "色コード UNKNOWN = 0 RED = 1 GREEN = 2 BLUE = 3", "hsv[2] <= 255): return Color.BLUE if (45 <= hsv[0] and hsv[0] <= 90)", "hsv[2] <= 255): return Color.GREEN if (20 <= hsv[0] and hsv[0] <= 30)", "3 BLACK = 4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME =", "5 # 色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN:", "(60 <= hsv[2] and hsv[2] <= 255): return Color.BLUE if (45 <= hsv[0]", "and hsv[0] <= 15) \\ and (20 <= hsv[1] and hsv[1] <= 255)", "255): return Color.RED if (100 <= hsv[0] and hsv[0] <= 115) \\ and", "and (60 <= hsv[1] and hsv[1] <= 255) \\ and (60 <= hsv[2]", "(0 <= hsv[0] and hsv[0] <= 15) \\ and (20 <= hsv[1] and", "return Color.GREEN if (20 <= hsv[0] and hsv[0] <= 30) \\ and (20", "115) \\ and (60 <= hsv[1] and hsv[1] <= 255) \\ and (60", "Color.UNKNOWN # コマンドコード定義 class CommandCode(): Specific = 0x01 All = 0x02 # レスポンスコード定義", "Color.BLACK: \"BLACK\", Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0", "hsv[0] and hsv[0] <= 115) \\ and (60 <= hsv[1] and hsv[1] <=", "<= hsv[0] and hsv[0] <= 90) \\ and (50 <= hsv[1] and hsv[1]", "Color.YELLOW: \"YELLOW\" } return COLOR_NAME[code] # HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい) def getColor(hsv): if (0 <= hsv[2]", "= 1 GREEN = 2 BLUE = 3 BLACK = 4 YELLOW =", "20): return Color.BLACK if (0 <= hsv[0] and hsv[0] <= 15) \\ and", "and hsv[1] <= 255) \\ and (50 <= hsv[2] and hsv[2] <= 255):", "hsv[2] and hsv[2] <= 255): return Color.YELLOW return Color.UNKNOWN # コマンドコード定義 class CommandCode():", "if (20 <= hsv[0] and hsv[0] <= 30) \\ and (20 <= hsv[1]", "Color.GREEN if (20 <= hsv[0] and hsv[0] <= 30) \\ and (20 <=", "\\ and (20 <= hsv[2] and hsv[2] <= 255): return Color.YELLOW return Color.UNKNOWN", "and hsv[0] <= 90) \\ and (50 <= hsv[1] and hsv[1] <= 255)", "return Color.RED if (100 <= hsv[0] and hsv[0] <= 115) \\ and (60", "(45 <= hsv[0] and hsv[0] <= 90) \\ and (50 <= hsv[1] and", "色コードから表示用文字列への変換メソッド def toColorName(code): COLOR_NAME = { Color.UNKNOWN: \"Unknown\", Color.RED: \"RED\", Color.GREEN: \"GREEN\", Color.BLUE:", "<= 255): return Color.BLUE if (45 <= hsv[0] and hsv[0] <= 90) \\", "2 BLUE = 3 BLACK = 4 YELLOW = 5 # 色コードから表示用文字列への変換メソッド def", "hsv[2] <= 255): return Color.RED if (100 <= hsv[0] and hsv[0] <= 115)", "if (45 <= hsv[0] and hsv[0] <= 90) \\ and (50 <= hsv[1]", "hsv[0] and hsv[0] <= 15) \\ and (20 <= hsv[1] and hsv[1] <=", "hsv[1] <= 255) \\ and (60 <= hsv[2] and hsv[2] <= 255): return" ]
[ "self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images =", "f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im = im.astype('float32')", "(platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() != 'viga'):", "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D,", "= ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res =", "vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1],", "= Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1]", "f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv", "1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__ ==", "Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint,", "32) pl.close('all') minv = 0.0 maxv = 1.0 np.random.seed(123) index = np.random.permutation(30) label", "($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3')", "as io from ipdb import set_trace as stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"]", "set_trace as stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\"", "- cumsum[:-N]) / N class LossHistory(Callback): def __init__(self, root, losses): self.root = root", "= np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop],", "inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same',", "border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r')", "vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop],", "f) def finalize(self): pass class trainDNNFull(object): def __init__(self, root): self.root = root self.nx", "matplotlib.pyplot as pl import h5py import platform import os import json import sys", "= model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None] * self.n_conv_layers deconv = [None]", "= ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2])", "kerasPlot import keras.optimizers from keras.utils import np_utils def running_mean(x, N): cumsum = np.cumsum(np.insert(x,", "0, 0)) return (cumsum[N:] - cumsum[:-N]) / N class LossHistory(Callback): def __init__(self, root,", "= [None] * self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3,", "ncols=5, figsize=(18,10)) for ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res", "import sys import argparse import scipy.ndimage as nd import pickle import scipy.io as", "activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if (i", "yield input_validation f_images.close() def predict_validation(self): print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity", "pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__ == '__main__'): out = trainDNNFull('cnns/test') out.defineNetwork()", "x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x =", "ModelCheckpoint, Callback from keras.models import Model, model_from_json from keras.utils.visualize_util import plot as kerasPlot", "self.skip_frequency == 0): x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same',", "init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters,", "finalize(self): pass class trainDNNFull(object): def __init__(self, root): self.root = root self.nx = 50", "= ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3])", "merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx,", "Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint, Callback from keras.models import Model, model_from_json from", "x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3,", "import pickle import scipy.io as io from ipdb import set_trace as stop if", "import Model, model_from_json from keras.utils.visualize_util import plot as kerasPlot import keras.optimizers from keras.utils", "= np.random.permutation(30) label = [1, 0.1, 0.01] for loop in range(3): f, ax", "Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint, Callback from", "batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f) def finalize(self): pass", "defineNetwork(self): conv = [None] * self.n_conv_layers deconv = [None] * self.n_conv_layers inputs =", "Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint, Callback", "activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters),", "ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv", "import np_utils def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] -", "def __init__(self, root): self.root = root self.nx = 50 self.ny = 50 self.n_times", "os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"", "h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while 1: for i in range(1): input_validation =", "self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None] * self.n_conv_layers deconv = [None] * self.n_conv_layers", "Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs,", "0.01] for loop in range(3): f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind", "3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum')", "f_images.get(\"intensity\") while 1: for i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close()", "as nd import pickle import scipy.io as io from ipdb import set_trace as", "network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close() json_string = json_string.replace('\"output_shape\": [null',", "border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same',", "2 def readNetwork(self): print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string = f.read()", "conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1):", "ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1],", "predict_validation(self): print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im", "(i % self.skip_frequency == 0): x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu',", "h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im = im.astype('float32') out =", "= Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6,", "6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images", "vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time", "f = open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\":", "ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN)", "= 50 self.ny = 50 self.n_times = 2 self.n_filters = 64 self.batch_size =", "= open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d'", "= json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self):", "f: json.dump(self.losses, f) def finalize(self): pass class trainDNNFull(object): def __init__(self, root): self.root =", "ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root,", "(platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import Input, Dense, Convolution2D, Flatten,", "self.n_conv_layers deconv = [None] * self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0] =", "= f_velocity.get(\"velocity\")[:] im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0", "(cumsum[N:] - cumsum[:-N]) / N class LossHistory(Callback): def __init__(self, root, losses): self.root =", "1: for i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self):", "f.read() f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32) self.model = model_from_json(json_string)", "= Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3,", "pass class trainDNNFull(object): def __init__(self, root): self.root = root self.nx = 50 self.ny", "in range(3): f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in range(3): minv", "= Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same',", "for loop in range(3): f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in", "self.ny = 50 self.n_times = 2 self.n_filters = 64 self.batch_size = 32 self.n_conv_layers", "final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model", "= np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N class LossHistory(Callback): def", "np import matplotlib.pyplot as pl import h5py import platform import os import json", "label = [1, 0.1, 0.01] for loop in range(3): f, ax = pl.subplots(nrows=3,", "pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv,", "Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu',", "self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i", "for i in range(self.n_conv_layers-1): if (i % self.skip_frequency == 0): x = Deconvolution2D(self.n_filters,", "Cropping2D, Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint, Callback from keras.models import Model, model_from_json", "* self.n_conv_layers deconv = [None] * self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0]", "class trainDNNFull(object): def __init__(self, root): self.root = root self.nx = 50 self.ny =", "json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv", "conv = [None] * self.n_conv_layers deconv = [None] * self.n_conv_layers inputs = Input(shape=(self.nx,", "root, losses): self.root = root self.losses = losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs)", "deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final =", "self.n_filters = 64 self.batch_size = 32 self.n_conv_layers = 10 self.stride = 1 self.skip_frequency", "stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node()", "vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy", "% self.skip_frequency == 0): x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride),", "Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for", "50 self.n_times = 2 self.n_filters = 64 self.batch_size = 32 self.n_conv_layers = 10", "minv = 0.0 maxv = 1.0 np.random.seed(123) index = np.random.permutation(30) label = [1,", "init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1])", "ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res,", "'w') as f: json.dump(self.losses, f) def finalize(self): pass class trainDNNFull(object): def __init__(self, root):", "in range(self.n_conv_layers-1): if (i % self.skip_frequency == 0): x = Deconvolution2D(self.n_filters, 3, 3,", "self.losses = losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f:", "model_from_json from keras.utils.visualize_util import plot as kerasPlot import keras.optimizers from keras.utils import np_utils", "from ipdb import set_trace as stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\"", "nd import pickle import scipy.io as io from ipdb import set_trace as stop", "if (i % self.skip_frequency == 0): x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters),", "/ N class LossHistory(Callback): def __init__(self, root, losses): self.root = root self.losses =", "root self.losses = losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as", "32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None] * self.n_conv_layers deconv", "subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride),", "Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i])", "= Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2],", "= [None] * self.n_conv_layers deconv = [None] * self.n_conv_layers inputs = Input(shape=(self.nx, self.ny,", "subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1]", "in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self): print(\"Predicting validation data...\")", "pickle import scipy.io as io from ipdb import set_trace as stop if (platform.node()", "class LossHistory(Callback): def __init__(self, root, losses): self.root = root self.losses = losses def", "in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] =", "\"1\" from keras.layers import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D,", "as np import matplotlib.pyplot as pl import h5py import platform import os import", "images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self): print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r')", "range(3): f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in range(3): minv =", "res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res,", "N class LossHistory(Callback): def __init__(self, root, losses): self.root = root self.losses = losses", "= \"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import Input,", "subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride),", "= \"1\" from keras.layers import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D,", "res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res,", "= 1.0 np.random.seed(123) index = np.random.permutation(30) label = [1, 0.1, 0.01] for loop", "import numpy as np import matplotlib.pyplot as pl import h5py import platform import", "import scipy.io as io from ipdb import set_trace as stop if (platform.node() ==", "h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im", "keras.layers import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from", "= merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3,", "__init__(self, root): self.root = root self.nx = 50 self.ny = 50 self.n_times =", "3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1):", "scipy.ndimage as nd import pickle import scipy.io as io from ipdb import set_trace", "0): x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x", "output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def", "for ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0])", "pl import h5py import platform import os import json import sys import argparse", "import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks", "trainDNNFull(object): def __init__(self, root): self.root = root self.nx = 50 self.ny = 50", "self.skip_frequency = 2 def readNetwork(self): print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string", "'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im =", "self.root = root self.nx = 50 self.ny = 50 self.n_times = 2 self.n_filters", "model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None] * self.n_conv_layers deconv = [None] *", "activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x) else:", "for i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i])", "def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while 1: for i", "np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N class LossHistory(Callback): def __init__(self,", "import plot as kerasPlot import keras.optimizers from keras.utils import np_utils def running_mean(x, N):", "activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images =", "1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root))", "3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1,", "init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same',", "border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1] =", "ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop()", "ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__", "for i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self): print(\"Predicting", "v = f_velocity.get(\"velocity\")[:] im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv =", "ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv)", "3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx,", "def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) /", "ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv)", "im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0 maxv =", "from keras.utils.visualize_util import plot as kerasPlot import keras.optimizers from keras.utils import np_utils def", "self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6),", "output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if (i %", "import keras.optimizers from keras.utils import np_utils def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0,", "keras.utils import np_utils def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:]", "maxv = 1.0 np.random.seed(123) index = np.random.permutation(30) label = [1, 0.1, 0.01] for", "im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0 maxv = 1.0 np.random.seed(123)", "np.random.permutation(30) label = [1, 0.1, 0.01] for loop in range(3): f, ax =", "'\"output_shape\": [%d' % 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None]", "__init__(self, root, losses): self.root = root self.losses = losses def on_epoch_end(self, batch, logs={}):", "maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv)", "json import sys import argparse import scipy.ndimage as nd import pickle import scipy.io", "border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if (i % self.skip_frequency == 0): x", "merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint, Callback from keras.models", "vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1')", "subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if (i % self.skip_frequency == 0):", "root): self.root = root self.nx = 50 self.ny = 50 self.n_times = 2", "3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if", "= 2 self.n_filters = 64 self.batch_size = 32 self.n_conv_layers = 10 self.stride =", "np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1])", "np_utils def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N])", "Model, model_from_json from keras.utils.visualize_util import plot as kerasPlot import keras.optimizers from keras.utils import", "self.nx = 50 self.ny = 50 self.n_times = 2 self.n_filters = 64 self.batch_size", "3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx,", "LossHistory(Callback): def __init__(self, root, losses): self.root = root self.losses = losses def on_epoch_end(self,", "self.n_times = 2 self.n_filters = 64 self.batch_size = 32 self.n_conv_layers = 10 self.stride", "= Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model =", "ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout()", "3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size,", "im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32)", "self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride),", "as pl import h5py import platform import os import json import sys import", "plot as kerasPlot import keras.optimizers from keras.utils import np_utils def running_mean(x, N): cumsum", "previous network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close() json_string = json_string.replace('\"output_shape\":", "= images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self): print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5',", "out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0 maxv = 1.0 np.random.seed(123) index", "activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3,", "json_string = f.read() f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32) self.model", "data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v", "output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny,", "N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N class", "self.n_conv_layers = 10 self.stride = 1 self.skip_frequency = 2 def readNetwork(self): print(\"Reading previous", "def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f) def", "range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters,", "from keras.utils import np_utils def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return", "keras.utils.visualize_util import plot as kerasPlot import keras.optimizers from keras.utils import np_utils def running_mean(x,", "i in range(self.n_conv_layers-1): if (i % self.skip_frequency == 0): x = Deconvolution2D(self.n_filters, 3,", "subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5',", "np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv,", "res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop]))", "import h5py import platform import os import json import sys import argparse import", "pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv,", "init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if (i % self.skip_frequency == 0): x =", "= h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while 1: for i in range(1): input_validation", "import matplotlib.pyplot as pl import h5py import platform import os import json import", "self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] = Activation('relu')(x)", "= f.read() f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32) self.model =", "root self.nx = 50 self.ny = 50 self.n_times = 2 self.n_filters = 64", "range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res", "import os import json import sys import argparse import scipy.ndimage as nd import", "32 self.n_conv_layers = 10 self.stride = 1 self.skip_frequency = 2 def readNetwork(self): print(\"Reading", "50 self.ny = 50 self.n_times = 2 self.n_filters = 64 self.batch_size = 32", "ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res", "10 self.stride = 1 self.skip_frequency = 2 def readNetwork(self): print(\"Reading previous network...\") f", "init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1])", "deconv[i+1] = Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride),", "= 64 self.batch_size = 32 self.n_conv_layers = 10 self.stride = 1 self.skip_frequency =", "cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N class LossHistory(Callback):", "border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride),", "init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images", "= Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i", "index = np.random.permutation(30) label = [1, 0.1, 0.01] for loop in range(3): f,", "= h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:]", "= np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res,", "3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__ == '__main__'): out = trainDNNFull('cnns/test')", "with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f) def finalize(self): pass class trainDNNFull(object): def", "json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def", "0.0 maxv = 1.0 np.random.seed(123) index = np.random.permutation(30) label = [1, 0.1, 0.01]", "= 1 self.skip_frequency = 2 def readNetwork(self): print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root),", "subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu',", "pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop]))", "mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu',", "ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv)", "def readNetwork(self): print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close()", "keras.models import Model, model_from_json from keras.utils.visualize_util import plot as kerasPlot import keras.optimizers from", "deconv = [None] * self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters,", "= 10 self.stride = 1 self.skip_frequency = 2 def readNetwork(self): print(\"Reading previous network...\")", "[null', '\"output_shape\": [%d' % 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv =", "def defineNetwork(self): conv = [None] * self.n_conv_layers deconv = [None] * self.n_conv_layers inputs", "else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final", "platform import os import json import sys import argparse import scipy.ndimage as nd", "input_validation f_images.close() def predict_validation(self): print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity =", "pl.close('all') minv = 0.0 maxv = 1.0 np.random.seed(123) index = np.random.permutation(30) label =", "= f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all')", "ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__ == '__main__'): out =", "cumsum[:-N]) / N class LossHistory(Callback): def __init__(self, root, losses): self.root = root self.losses", "keras.callbacks import ModelCheckpoint, Callback from keras.models import Model, model_from_json from keras.utils.visualize_util import plot", "<filename>opticalFlow/deepvel/training/testFConv.py import numpy as np import matplotlib.pyplot as pl import h5py import platform", "keras.optimizers from keras.utils import np_utils def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0))", "[None] * self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3,", "ipdb import set_trace as stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"]", "= ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN)", "losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f)", "as f: json.dump(self.losses, f) def finalize(self): pass class trainDNNFull(object): def __init__(self, root): self.root", "Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while", "if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import Input, Dense, Convolution2D,", "sys import argparse import scipy.ndimage as nd import pickle import scipy.io as io", "= root self.losses = losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w')", "open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f) def finalize(self): pass class trainDNNFull(object): def __init__(self,", "= ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4])", "($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop]))", "os import json import sys import argparse import scipy.ndimage as nd import pickle", "self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self):", "argparse import scipy.ndimage as nd import pickle import scipy.io as io from ipdb", "os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import", "readNetwork(self): print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close() json_string", "import platform import os import json import sys import argparse import scipy.ndimage as", "x], mode='sum') deconv[i+1] = Activation('relu')(x) else: deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters),", "self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if (i % self.skip_frequency ==", "[1, 0.1, 0.01] for loop in range(3): f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10))", "1 self.skip_frequency = 2 def readNetwork(self): print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root), 'r')", "scipy.io as io from ipdb import set_trace as stop if (platform.node() == 'viga'):", "1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1]) self.model = Model(input=inputs, output=final)", "\"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers", "vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop]))", "on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f) def finalize(self):", "open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' %", "f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]])", "io from ipdb import set_trace as stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] =", "import scipy.ndimage as nd import pickle import scipy.io as io from ipdb import", "self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None] * self.n_conv_layers deconv =", "self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f) def finalize(self): pass class trainDNNFull(object):", "Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1] =", "print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im =", "Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x],", "logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses, f) def finalize(self): pass class", "self.batch_size = 32 self.n_conv_layers = 10 self.stride = 1 self.skip_frequency = 2 def", "in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0])", "3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3,", "return (cumsum[N:] - cumsum[:-N]) / N class LossHistory(Callback): def __init__(self, root, losses): self.root", "validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:]", "'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D,", "UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint, Callback from keras.models import Model,", "self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while 1: for", "h5py import platform import os import json import sys import argparse import scipy.ndimage", "import argparse import scipy.ndimage as nd import pickle import scipy.io as io from", "Activation from keras.callbacks import ModelCheckpoint, Callback from keras.models import Model, model_from_json from keras.utils.visualize_util", "self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in range(self.n_conv_layers-1): if (i % self.skip_frequency", "figsize=(18,10)) for ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res =", "f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v =", "!= 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import Input, Dense, Convolution2D, Flatten, merge,", "input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self): print(\"Predicting validation data...\") f_images =", "ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__ == '__main__'):", "\"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from keras.layers import Input, Dense,", "running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N", "ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop]))", "f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while 1: for i in range(1):", "border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same',", "= Model(input=inputs, output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\")", "= im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0 maxv = 1.0", "= root self.nx = 50 self.ny = 50 self.n_times = 2 self.n_filters =", "ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res", "== 0): x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i])", "self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in", "import ModelCheckpoint, Callback from keras.models import Model, model_from_json from keras.utils.visualize_util import plot as", "ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time", "as stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if", "output=final) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while 1:", "vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res =", "% 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None] * self.n_conv_layers", "pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__ == '__main__'): out = trainDNNFull('cnns/test') out.defineNetwork() out.predict_validation()", "i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0]", "res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res", "def predict_validation(self): print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r')", "if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() !=", "[%d' % 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root)) def defineNetwork(self): conv = [None] *", "vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res =", "= \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" from", "activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu',", "print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string = f.read() f.close() json_string =", "while 1: for i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def", "pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]])", "def __init__(self, root, losses): self.root = root self.losses = losses def on_epoch_end(self, batch,", "= h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r') im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im = im.astype('float32') out", "loop in range(3): f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in range(3):", "= 32 self.n_conv_layers = 10 self.stride = 1 self.skip_frequency = 2 def readNetwork(self):", "Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for i in", "f_images.close() def predict_validation(self): print(\"Predicting validation data...\") f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5',", "range(self.n_conv_layers-1): if (i % self.skip_frequency == 0): x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx,", "= losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root), 'w') as f: json.dump(self.losses,", "from keras.callbacks import ModelCheckpoint, Callback from keras.models import Model, model_from_json from keras.utils.visualize_util import", "3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs) for i in range(self.n_conv_layers-1): conv[i+1] = Convolution2D(self.n_filters,", "f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32) self.model = model_from_json(json_string) self.model.load_weights(\"{0}_weights.hdf5\".format(self.root))", "0)) return (cumsum[N:] - cumsum[:-N]) / N class LossHistory(Callback): def __init__(self, root, losses):", "from keras.layers import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation", "self.root = root self.losses = losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with open(\"{0}_loss.json\".format(self.root),", "res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res,", "= 0.0 maxv = 1.0 np.random.seed(123) index = np.random.permutation(30) label = [1, 0.1,", "3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1]", "import json import sys import argparse import scipy.ndimage as nd import pickle import", "'r') json_string = f.read() f.close() json_string = json_string.replace('\"output_shape\": [null', '\"output_shape\": [%d' % 32)", "= f_images.get(\"intensity\") while 1: for i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation", "MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks import ModelCheckpoint, Callback from keras.models import", "Callback from keras.models import Model, model_from_json from keras.utils.visualize_util import plot as kerasPlot import", "[None] * self.n_conv_layers deconv = [None] * self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times))", "ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2')", "from keras.models import Model, model_from_json from keras.utils.visualize_util import plot as kerasPlot import keras.optimizers", "losses): self.root = root self.losses = losses def on_epoch_end(self, batch, logs={}): self.losses.append(logs) with", "Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation from keras.callbacks import", "json.dump(self.losses, f) def finalize(self): pass class trainDNNFull(object): def __init__(self, root): self.root = root", "pl.colorbar(res, ax=ax[ind,1]) res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,2]) res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv,", "2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if (__name__ == '__main__'): out", "self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0 maxv = 1.0 np.random.seed(123) index = np.random.permutation(30)", "= Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs)", "def finalize(self): pass class trainDNNFull(object): def __init__(self, root): self.root = root self.nx =", "64 self.batch_size = 32 self.n_conv_layers = 10 self.stride = 1 self.skip_frequency = 2", "= [1, 0.1, 0.01] for loop in range(3): f, ax = pl.subplots(nrows=3, ncols=5,", "minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) res = ax[ind,0].imshow(im[index[ind],:,:,0]) pl.colorbar(res, ax=ax[ind,0]) res =", "validation_generator(self): f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r') images = f_images.get(\"intensity\") while 1: for i in", "output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) x = merge([conv[self.n_conv_layers-i-2], x], mode='sum') deconv[i+1] =", "import set_trace as stop if (platform.node() == 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] =", "self.stride = 1 self.skip_frequency = 2 def readNetwork(self): print(\"Reading previous network...\") f =", "= pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for ind in range(3): minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]]) maxv =", "'r') im = f_images.get(\"intensity\")[:] v = f_velocity.get(\"velocity\")[:] im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(),", "as kerasPlot import keras.optimizers from keras.utils import np_utils def running_mean(x, N): cumsum =", "ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,3]) res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv) pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx", "pl.colorbar(res, ax=ax[ind,4]) ax[ind,1].set_title(r'vx ($\\tau$={0})'.format(label[loop])) ax[ind,2].set_title(r'vy ($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time", "= 2 def readNetwork(self): print(\"Reading previous network...\") f = open('{0}_model.json'.format(self.root), 'r') json_string =", "== 'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"]", "images = f_images.get(\"intensity\") while 1: for i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield", "Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i]) final = Deconvolution2D(6, 1,", "0.1, 0.01] for loop in range(3): f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10)) for", "np.random.seed(123) index = np.random.permutation(30) label = [1, 0.1, 0.01] for loop in range(3):", "i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self): print(\"Predicting validation", "range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32') yield input_validation f_images.close() def predict_validation(self): print(\"Predicting validation data...\") f_images", "'r') images = f_images.get(\"intensity\") while 1: for i in range(1): input_validation = images[i:i+self.batch_size,:,:,:].astype('float32')", "f_velocity.get(\"velocity\")[:] im = im.astype('float32') out = self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0 maxv", "deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1]) for", "($\\tau$={0})'.format(label[loop])) ax[ind,3].set_title(r'vx(CNN) ($\\tau$={0})'.format(label[loop])) ax[ind,4].set_title(r'vy(CNN) ($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show()", "= 50 self.n_times = 2 self.n_filters = 64 self.batch_size = 32 self.n_conv_layers =", "2 self.n_filters = 64 self.batch_size = 32 self.n_conv_layers = 10 self.stride = 1", "($\\tau$={0})'.format(label[loop])) ax[0,0].set_title('Time 1') ax[1,0].set_title('Time 2') ax[2,0].set_title('Time 3') pl.tight_layout() pl.show() pl.savefig(\"{0}_prediction_tau_{1}.png\".format(self.root, label[loop])) stop() if", "1.0 np.random.seed(123) index = np.random.permutation(30) label = [1, 0.1, 0.01] for loop in", "conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i]) deconv[0] = Deconvolution2D(self.n_filters, 3,", "numpy as np import matplotlib.pyplot as pl import h5py import platform import os", "= self.model.predict_generator(self.validation_generator(), 32) pl.close('all') minv = 0.0 maxv = 1.0 np.random.seed(123) index =", "* self.n_conv_layers inputs = Input(shape=(self.nx, self.ny, self.n_times)) conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu',", "'viga'): os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\" os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" if (platform.node() != 'viga'): os.environ[\"CUDA_VISIBLE_DEVICES\"] =" ]
[ "= config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was not provided\")", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "import requests from time import sleep from json import JSONDecodeError from configparser import", "= filename.decode() files = {'file': (filename, payload.content)} data = {'environment_id': self.environment_id} response =", "was not provided\") def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan", "self.environment_id = 160 self.wait_for_results = True if plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url", "Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan payloads using Falcon Sandbox \"\"\" errors", "= self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results( self, job_id: str ) -> Tuple[Union[Dict,", "using Falcon Sandbox \"\"\" import requests from time import sleep from json import", "\"\"\" import requests from time import sleep from json import JSONDecodeError from configparser", "in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id') if", "-> WorkerResponse: \"\"\" Scan payloads using Falcon Sandbox \"\"\" errors = None url", "scan to complete and then parse the results \"\"\" count = 0 err", "the License. \"\"\" Overview ======== Scan payloads using Falcon Sandbox \"\"\" import requests", "None], Union[List[str], None]]: \"\"\" Wait for a scan to complete and then parse", "from typing import Dict, Optional, Union, Tuple, List from stoq import helpers from", "filename.decode() files = {'file': (filename, payload.content)} data = {'environment_id': self.environment_id} response = requests.post(url,", "this file except in compliance with the License. # You may obtain a", "= plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent') if plugin_opts and 'environment_id'", "config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent') if plugin_opts and 'environment_id' in plugin_opts: self.environment_id", "if self.wait_for_results: results, errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results( self, job_id:", "= response.json() if self.wait_for_results: results, errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results(", "config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options',", "url = f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename',", "the results \"\"\" count = 0 err = None while count < self.max_attempts:", "List from stoq import helpers from stoq.plugins import WorkerPlugin from stoq.exceptions import StoqPluginException", "a scan to complete and then parse the results \"\"\" count = 0", "'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results')", "stoq.exceptions import StoqPluginException from stoq import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self,", "ANY KIND, either express or implied. # See the License for the specific", "response.raise_for_status() result = response.json() if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return result, None", "self.apikey, 'user-agent': self.useragent} response = requests.get(url, headers=headers) response.raise_for_status() result = response.json() if result['state']", "None while count < self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key':", "= config.get('options', 'apikey') if plugin_opts and 'delay' in plugin_opts: self.delay = int(plugin_opts['delay']) elif", "32 bit’ self.environment_id = 160 self.wait_for_results = True if plugin_opts and 'sandbox_url' in", "self.useragent} response = requests.get(url, headers=headers) response.raise_for_status() result = response.json() if result['state'] not in", "JSONDecodeError from configparser import ConfigParser from typing import Dict, Optional, Union, Tuple, List", "self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename", "'useragent') if plugin_opts and 'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'):", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "json import JSONDecodeError from configparser import ConfigParser from typing import Dict, Optional, Union,", "stoq.plugins import WorkerPlugin from stoq.exceptions import StoqPluginException from stoq import Payload, RequestMeta, WorkerResponse", "self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename = filename.decode()", "self.wait_for_results = True if plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif", "not provided\") if not self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key was not provided\")", "def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url = None", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "+= 1 if count >= self.max_attempts: err = f'Scan did not complete in", "plugin_opts and 'apikey' in plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey =", "f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent': self.useragent} response = requests.get(url, headers=headers) response.raise_for_status() result", "and 'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options',", "\"\"\" errors = None url = f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent': self.useragent}", "plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts =", "self.max_attempts: err = f'Scan did not complete in time -- attempts: {count}' return", "Available environments ID: # 300: 'Linux (Ubuntu 16.04, 64 bit)', # 200: 'Android", "OF ANY KIND, either express or implied. # See the License for the", "results, errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results( self, job_id: str )", "self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if not self.sandbox_url:", "Overview ======== Scan payloads using Falcon Sandbox \"\"\" import requests from time import", "self.delay = 30 self.max_attempts = 10 self.useragent = 'Falcon Sandbox' # Available environments", "plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey') if plugin_opts and 'delay' in", "the specific language governing permissions and # limitations under the License. \"\"\" Overview", "import ConfigParser from typing import Dict, Optional, Union, Tuple, List from stoq import", "err = str(err) finally: count += 1 if count >= self.max_attempts: err =", "64 bit’, # 110: 'Windows 7 64 bit’, # 100: ‘Windows 7 32", "200: 'Android Static Analysis’, # 160: 'Windows 10 64 bit’, # 110: 'Windows", "7 32 bit’ self.environment_id = 160 self.wait_for_results = True if plugin_opts and 'sandbox_url'", "self.delay = int(config.get('options', 'delay')) if plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts'])", "import JSONDecodeError from configparser import ConfigParser from typing import Dict, Optional, Union, Tuple,", "10 self.useragent = 'Falcon Sandbox' # Available environments ID: # 300: 'Linux (Ubuntu", "config.get('options', 'sandbox_url') if plugin_opts and 'apikey' in plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options',", "110: 'Windows 7 64 bit’, # 100: ‘Windows 7 32 bit’ self.environment_id =", "# 200: 'Android Static Analysis’, # 160: 'Windows 10 64 bit’, # 110:", "from stoq.exceptions import StoqPluginException from stoq import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def", "plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon", "Analysis’, # 160: 'Windows 10 64 bit’, # 110: 'Windows 7 64 bit’,", "= {'api-key': self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename,", "License. \"\"\" Overview ======== Scan payloads using Falcon Sandbox \"\"\" import requests from", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts and 'useragent' in plugin_opts:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= 160 self.wait_for_results = True if plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url =", "from stoq.plugins import WorkerPlugin from stoq.exceptions import StoqPluginException from stoq import Payload, RequestMeta,", "64 bit’, # 100: ‘Windows 7 32 bit’ self.environment_id = 160 self.wait_for_results =", "if count >= self.max_attempts: err = f'Scan did not complete in time --", "self.environment_id = config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results']", "and 'apikey' in plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey = config.get('options',", "-> None: super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey = None self.delay = 30", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "payloads using Falcon Sandbox \"\"\" import requests from time import sleep from json", "ConfigParser from typing import Dict, Optional, Union, Tuple, List from stoq import helpers", "response = requests.get(url, headers=headers) response.raise_for_status() result = response.json() if result['state'] not in ('IN_QUEUE',", "self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent': self.useragent} response", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey') if plugin_opts and 'delay' in plugin_opts: self.delay", "results \"\"\" count = 0 err = None while count < self.max_attempts: sleep(self.delay)", "('IN_QUEUE', 'IN_PROGRESS'): return result, None except (JSONDecodeError, KeyError) as err: err = str(err)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "'environment_id') if plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'):", "not in ('IN_QUEUE', 'IN_PROGRESS'): return result, None except (JSONDecodeError, KeyError) as err: err", "= None self.delay = 30 self.max_attempts = 10 self.useragent = 'Falcon Sandbox' #", "raise StoqPluginException(\"Falcon Sandbox URL was not provided\") if not self.apikey: raise StoqPluginException(\"Falcon Sandbox", "(JSONDecodeError, KeyError) as err: err = str(err) finally: count += 1 if count", "Scan payloads using Falcon Sandbox \"\"\" errors = None url = f'{self.sandbox_url}/submit/file' headers", "# 100: ‘Windows 7 32 bit’ self.environment_id = 160 self.wait_for_results = True if", "required by applicable law or agreed to in writing, software # distributed under", "‘Windows 7 32 bit’ self.environment_id = 160 self.wait_for_results = True if plugin_opts and", "scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan payloads using Falcon Sandbox", "elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox", "applicable law or agreed to in writing, software # distributed under the License", "10 64 bit’, # 110: 'Windows 7 64 bit’, # 100: ‘Windows 7", "or agreed to in writing, software # distributed under the License is distributed", "self.apikey = None self.delay = 30 self.max_attempts = 10 self.useragent = 'Falcon Sandbox'", "limitations under the License. \"\"\" Overview ======== Scan payloads using Falcon Sandbox \"\"\"", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "response = requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results = response.json() if self.wait_for_results: results,", "Copyright 2014-2018 PUNCH Cyber Analytics Group # # Licensed under the Apache License,", "if plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url", "Falcon Sandbox \"\"\" errors = None url = f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey,", "Group # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was not provided\") if not self.apikey: raise", "if plugin_opts and 'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results( self, job_id: str ) -> Tuple[Union[Dict, None],", "__init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "160 self.wait_for_results = True if plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url']", "License. # You may obtain a copy of the License at # #", "'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was", "= None self.apikey = None self.delay = 30 self.max_attempts = 10 self.useragent =", "config.get('options', 'apikey') if plugin_opts and 'delay' in plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options',", "and then parse the results \"\"\" count = 0 err = None while", "= payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename = filename.decode() files =", "response.json() if self.wait_for_results: results, errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results( self,", "Sandbox \"\"\" import requests from time import sleep from json import JSONDecodeError from", "= None while count < self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers =", "16.04, 64 bit)', # 200: 'Android Static Analysis’, # 160: 'Windows 10 64", "except (JSONDecodeError, KeyError) as err: err = str(err) finally: count += 1 if", "compliance with the License. # You may obtain a copy of the License", "elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts and 'apikey' in plugin_opts:", "self.wait_for_results: results, errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results( self, job_id: str", "Tuple, List from stoq import helpers from stoq.plugins import WorkerPlugin from stoq.exceptions import", "import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) ->", "request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan payloads using Falcon Sandbox \"\"\" errors =", "= config.get('options', 'sandbox_url') if plugin_opts and 'apikey' in plugin_opts: self.apikey = plugin_opts['apikey'] elif", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "'useragent'): self.useragent = config.get('options', 'useragent') if plugin_opts and 'environment_id' in plugin_opts: self.environment_id =", "data=data, files=files, headers=headers) response.raise_for_status() results = response.json() if self.wait_for_results: results, errors = self._parse_results(results['job_id'])", "import helpers from stoq.plugins import WorkerPlugin from stoq.exceptions import StoqPluginException from stoq import", "= 0 err = None while count < self.max_attempts: sleep(self.delay) try: url =", "errors=errors) def _parse_results( self, job_id: str ) -> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\"", "64 bit)', # 200: 'Android Static Analysis’, # 160: 'Windows 10 64 bit’,", "= response.json() if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return result, None except (JSONDecodeError,", "results = response.json() if self.wait_for_results: results, errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def", "Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None:", "count < self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent':", "self.useragent = 'Falcon Sandbox' # Available environments ID: # 300: 'Linux (Ubuntu 16.04,", "payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename = filename.decode() files = {'file':", "Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey = None self.delay =", "not use this file except in compliance with the License. # You may", "Dict, Optional, Union, Tuple, List from stoq import helpers from stoq.plugins import WorkerPlugin", "language governing permissions and # limitations under the License. \"\"\" Overview ======== Scan", "License, Version 2.0 (the \"License\"); # you may not use this file except", "import Dict, Optional, Union, Tuple, List from stoq import helpers from stoq.plugins import", "int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay')) if plugin_opts and 'max_attempts' in", "'Linux (Ubuntu 16.04, 64 bit)', # 200: 'Android Static Analysis’, # 160: 'Windows", "= 30 self.max_attempts = 10 self.useragent = 'Falcon Sandbox' # Available environments ID:", "Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait for a scan to complete and then", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "bit’, # 100: ‘Windows 7 32 bit’ self.environment_id = 160 self.wait_for_results = True", "None]]: \"\"\" Wait for a scan to complete and then parse the results", "7 64 bit’, # 100: ‘Windows 7 32 bit’ self.environment_id = 160 self.wait_for_results", "and 'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options',", "int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results' in", "in plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent') if", "# you may not use this file except in compliance with the License.", "in plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay')) if", "plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey') if plugin_opts", "if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return result, None except (JSONDecodeError, KeyError) as", "agreed to in writing, software # distributed under the License is distributed on", "plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey = None self.delay", "'IN_PROGRESS'): return result, None except (JSONDecodeError, KeyError) as err: err = str(err) finally:", "if plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results", "Scan payloads using Falcon Sandbox \"\"\" import requests from time import sleep from", "f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) )", "err: err = str(err) finally: count += 1 if count >= self.max_attempts: err", "(the \"License\"); # you may not use this file except in compliance with", "plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent') if plugin_opts", "raise StoqPluginException(\"Falcon Sandbox API Key was not provided\") def scan(self, payload: Payload, request_meta:", "plugin_opts and 'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id =", "'apikey'): self.apikey = config.get('options', 'apikey') if plugin_opts and 'delay' in plugin_opts: self.delay =", "WorkerResponse: \"\"\" Scan payloads using Falcon Sandbox \"\"\" errors = None url =", "to complete and then parse the results \"\"\" count = 0 err =", "# Unless required by applicable law or agreed to in writing, software #", "data = {'environment_id': self.environment_id} response = requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results =", "0 err = None while count < self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary'", "by applicable law or agreed to in writing, software # distributed under the", "in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if", "job_id: str ) -> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait for a scan", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "response.json() if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return result, None except (JSONDecodeError, KeyError)", "sleep from json import JSONDecodeError from configparser import ConfigParser from typing import Dict,", "Key was not provided\") def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\"", "return WorkerResponse(results, errors=errors) def _parse_results( self, job_id: str ) -> Tuple[Union[Dict, None], Union[List[str],", "from stoq import helpers from stoq.plugins import WorkerPlugin from stoq.exceptions import StoqPluginException from", "\"\"\" Scan payloads using Falcon Sandbox \"\"\" errors = None url = f'{self.sandbox_url}/submit/file'", "payload: Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan payloads using Falcon Sandbox \"\"\"", "self.wait_for_results = config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was not", "= config.get('options', 'useragent') if plugin_opts and 'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif", "config.get('options', 'useragent') if plugin_opts and 'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options',", "'environment_id'): self.environment_id = config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results =", "environments ID: # 300: 'Linux (Ubuntu 16.04, 64 bit)', # 200: 'Android Static", "and 'useragent' in plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent = config.get('options',", "if not self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key was not provided\") def scan(self,", "file except in compliance with the License. # You may obtain a copy", "'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts')", "# 300: 'Linux (Ubuntu 16.04, 64 bit)', # 200: 'Android Static Analysis’, #", "self.environment_id} response = requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results = response.json() if self.wait_for_results:", "using Falcon Sandbox \"\"\" errors = None url = f'{self.sandbox_url}/submit/file' headers = {'api-key':", "'Android Static Analysis’, # 160: 'Windows 10 64 bit’, # 110: 'Windows 7", "_parse_results( self, job_id: str ) -> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait for", "requests from time import sleep from json import JSONDecodeError from configparser import ConfigParser", "License for the specific language governing permissions and # limitations under the License.", "not provided\") def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan payloads", "Sandbox' # Available environments ID: # 300: 'Linux (Ubuntu 16.04, 64 bit)', #", "count += 1 if count >= self.max_attempts: err = f'Scan did not complete", "======== Scan payloads using Falcon Sandbox \"\"\" import requests from time import sleep", "to in writing, software # distributed under the License is distributed on an", "filename = filename.decode() files = {'file': (filename, payload.content)} data = {'environment_id': self.environment_id} response", "implied. # See the License for the specific language governing permissions and #", "self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id') if plugin_opts and", "\"License\"); # you may not use this file except in compliance with the", "then parse the results \"\"\" count = 0 err = None while count", "= f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content)", "import sleep from json import JSONDecodeError from configparser import ConfigParser from typing import", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "{'api-key': self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes):", "errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors) def _parse_results( self, job_id: str ) ->", "elif config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey') if plugin_opts and 'delay' in plugin_opts:", "config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL", "config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results", "count >= self.max_attempts: err = f'Scan did not complete in time -- attempts:", "160: 'Windows 10 64 bit’, # 110: 'Windows 7 64 bit’, # 100:", "int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts and 'useragent' in", "'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was not provided\") if not", "\"\"\" count = 0 err = None while count < self.max_attempts: sleep(self.delay) try:", "WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts)", "or implied. # See the License for the specific language governing permissions and", "# 110: 'Windows 7 64 bit’, # 100: ‘Windows 7 32 bit’ self.environment_id", "in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if", "Sandbox \"\"\" errors = None url = f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent':", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "result, None except (JSONDecodeError, KeyError) as err: err = str(err) finally: count +=", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts and 'useragent'", "URL was not provided\") if not self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key was", "'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename =", "config: ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results' in plugin_opts:", "in writing, software # distributed under the License is distributed on an \"AS", "'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts and 'apikey' in plugin_opts: self.apikey =", "plugin_opts and 'useragent' in plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent =", "import StoqPluginException from stoq import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config:", "'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url')", "# Copyright 2014-2018 PUNCH Cyber Analytics Group # # Licensed under the Apache", "and 'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options',", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "under the License. \"\"\" Overview ======== Scan payloads using Falcon Sandbox \"\"\" import", "= True if plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options',", "self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts and", "parse the results \"\"\" count = 0 err = None while count <", "{'api-key': self.apikey, 'user-agent': self.useragent} response = requests.get(url, headers=headers) response.raise_for_status() result = response.json() if", "= plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey') if plugin_opts and 'delay'", "self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was not provided\") if not self.apikey: raise StoqPluginException(\"Falcon", "\"\"\" Wait for a scan to complete and then parse the results \"\"\"", "helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename = filename.decode() files = {'file': (filename, payload.content)}", "Sandbox API Key was not provided\") def scan(self, payload: Payload, request_meta: RequestMeta) ->", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "\"\"\" Overview ======== Scan payloads using Falcon Sandbox \"\"\" import requests from time", "'user-agent': self.useragent} response = requests.get(url, headers=headers) response.raise_for_status() result = response.json() if result['state'] not", "Analytics Group # # Licensed under the Apache License, Version 2.0 (the \"License\");", "self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts and 'useragent' in plugin_opts: self.useragent = plugin_opts['useragent']", "url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent': self.useragent} response = requests.get(url, headers=headers)", "self, job_id: str ) -> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait for a", "'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts and 'useragent' in plugin_opts: self.useragent =", "from time import sleep from json import JSONDecodeError from configparser import ConfigParser from", "self.useragent = config.get('options', 'useragent') if plugin_opts and 'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id'])", "time import sleep from json import JSONDecodeError from configparser import ConfigParser from typing", "= int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results'", "use this file except in compliance with the License. # You may obtain", "not self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key was not provided\") def scan(self, payload:", "and 'delay' in plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay = int(config.get('options',", "for the specific language governing permissions and # limitations under the License. \"\"\"", "= requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results = response.json() if self.wait_for_results: results, errors", "plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent') if plugin_opts and 'environment_id' in", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "int(config.get('options', 'delay')) if plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options',", "None: super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey = None self.delay = 30 self.max_attempts", "300: 'Linux (Ubuntu 16.04, 64 bit)', # 200: 'Android Static Analysis’, # 160:", "(filename, payload.content)} data = {'environment_id': self.environment_id} response = requests.post(url, data=data, files=files, headers=headers) response.raise_for_status()", "2.0 (the \"License\"); # you may not use this file except in compliance", "Optional, Union, Tuple, List from stoq import helpers from stoq.plugins import WorkerPlugin from", "{'environment_id': self.environment_id} response = requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results = response.json() if", "= int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay')) if plugin_opts and 'max_attempts'", "plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts", "Wait for a scan to complete and then parse the results \"\"\" count", "files=files, headers=headers) response.raise_for_status() results = response.json() if self.wait_for_results: results, errors = self._parse_results(results['job_id']) return", "None self.delay = 30 self.max_attempts = 10 self.useragent = 'Falcon Sandbox' # Available", "governing permissions and # limitations under the License. \"\"\" Overview ======== Scan payloads", "'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id')", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "'Windows 10 64 bit’, # 110: 'Windows 7 64 bit’, # 100: ‘Windows", "Falcon Sandbox \"\"\" import requests from time import sleep from json import JSONDecodeError", "result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return result, None except (JSONDecodeError, KeyError) as err:", "StoqPluginException(\"Falcon Sandbox API Key was not provided\") def scan(self, payload: Payload, request_meta: RequestMeta)", "count = 0 err = None while count < self.max_attempts: sleep(self.delay) try: url", "# # Unless required by applicable law or agreed to in writing, software", "if plugin_opts and 'useragent' in plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent", "headers = {'api-key': self.apikey, 'user-agent': self.useragent} response = requests.get(url, headers=headers) response.raise_for_status() result =", "express or implied. # See the License for the specific language governing permissions", "self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts and", "self.max_attempts = 10 self.useragent = 'Falcon Sandbox' # Available environments ID: # 300:", "StoqPluginException(\"Falcon Sandbox URL was not provided\") if not self.apikey: raise StoqPluginException(\"Falcon Sandbox API", "isinstance(filename, bytes): filename = filename.decode() files = {'file': (filename, payload.content)} data = {'environment_id':", "RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config,", "= requests.get(url, headers=headers) response.raise_for_status() result = response.json() if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'):", "headers = {'api-key': self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if", "either express or implied. # See the License for the specific language governing", "headers=headers) response.raise_for_status() result = response.json() if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return result,", "and 'environment_id' in plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options',", "err = f'Scan did not complete in time -- attempts: {count}' return None,", "typing import Dict, Optional, Union, Tuple, List from stoq import helpers from stoq.plugins", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "complete and then parse the results \"\"\" count = 0 err = None", "result = response.json() if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return result, None except", "requests.get(url, headers=headers) response.raise_for_status() result = response.json() if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'): return", "= config.getint('options', 'max_attempts') if plugin_opts and 'useragent' in plugin_opts: self.useragent = plugin_opts['useragent'] elif", "= {'file': (filename, payload.content)} data = {'environment_id': self.environment_id} response = requests.post(url, data=data, files=files,", "(Ubuntu 16.04, 64 bit)', # 200: 'Android Static Analysis’, # 160: 'Windows 10", "'max_attempts') if plugin_opts and 'useragent' in plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'):", "Sandbox URL was not provided\") if not self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key", "if plugin_opts and 'delay' in plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay", "plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url =", "str(err) finally: count += 1 if count >= self.max_attempts: err = f'Scan did", "= plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts and 'apikey'", "the License. # You may obtain a copy of the License at #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "plugin_opts: self.environment_id = int(plugin_opts['environment_id']) elif config.has_option('options', 'environment_id'): self.environment_id = config.getint('options', 'environment_id') if plugin_opts", "-> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait for a scan to complete and", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", ") if isinstance(filename, bytes): filename = filename.decode() files = {'file': (filename, payload.content)} data", "ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey = None", "'apikey' in plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey')", "bytes): filename = filename.decode() files = {'file': (filename, payload.content)} data = {'environment_id': self.environment_id}", "self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts and 'apikey' in plugin_opts: self.apikey = plugin_opts['apikey']", "plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if not", "super().__init__(config, plugin_opts) self.sandbox_url = None self.apikey = None self.delay = 30 self.max_attempts =", "bit’ self.environment_id = 160 self.wait_for_results = True if plugin_opts and 'sandbox_url' in plugin_opts:", "stoq import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict])", "config.has_option('options', 'max_attempts'): self.max_attempts = config.getint('options', 'max_attempts') if plugin_opts and 'useragent' in plugin_opts: self.useragent", "# limitations under the License. \"\"\" Overview ======== Scan payloads using Falcon Sandbox", "< self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent': self.useragent}", "bit’, # 110: 'Windows 7 64 bit’, # 100: ‘Windows 7 32 bit’", "= int(config.get('options', 'delay')) if plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif", "class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url", "= f'Scan did not complete in time -- attempts: {count}' return None, [err]", "StoqPluginException from stoq import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser,", "self.sandbox_url = None self.apikey = None self.delay = 30 self.max_attempts = 10 self.useragent", "sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent': self.useragent} response =", "with the License. # You may obtain a copy of the License at", "def _parse_results( self, job_id: str ) -> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait", "from stoq import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts:", "RequestMeta) -> WorkerResponse: \"\"\" Scan payloads using Falcon Sandbox \"\"\" errors = None", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "config.getint('options', 'max_attempts') if plugin_opts and 'useragent' in plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options',", "headers=headers) response.raise_for_status() results = response.json() if self.wait_for_results: results, errors = self._parse_results(results['job_id']) return WorkerResponse(results,", "as err: err = str(err) finally: count += 1 if count >= self.max_attempts:", "= {'environment_id': self.environment_id} response = requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results = response.json()", "# 160: 'Windows 10 64 bit’, # 110: 'Windows 7 64 bit’, #", "response.raise_for_status() results = response.json() if self.wait_for_results: results, errors = self._parse_results(results['job_id']) return WorkerResponse(results, errors=errors)", "config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts and 'apikey' in plugin_opts: self.apikey", "'Falcon Sandbox' # Available environments ID: # 300: 'Linux (Ubuntu 16.04, 64 bit)',", "= f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent': self.useragent} response = requests.get(url, headers=headers) response.raise_for_status()", "'delay' in plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay'))", "law or agreed to in writing, software # distributed under the License is", "# Available environments ID: # 300: 'Linux (Ubuntu 16.04, 64 bit)', # 200:", "the License for the specific language governing permissions and # limitations under the", "configparser import ConfigParser from typing import Dict, Optional, Union, Tuple, List from stoq", "provided\") def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan payloads using", "permissions and # limitations under the License. \"\"\" Overview ======== Scan payloads using", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Union[List[str], None]]: \"\"\" Wait for a scan to complete and then parse the", "stoq import helpers from stoq.plugins import WorkerPlugin from stoq.exceptions import StoqPluginException from stoq", "from configparser import ConfigParser from typing import Dict, Optional, Union, Tuple, List from", "errors = None url = f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent': self.useragent} filename", "if isinstance(filename, bytes): filename = filename.decode() files = {'file': (filename, payload.content)} data =", "files = {'file': (filename, payload.content)} data = {'environment_id': self.environment_id} response = requests.post(url, data=data,", "if plugin_opts and 'apikey' in plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey", "in plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey') if", "WorkerResponse(results, errors=errors) def _parse_results( self, job_id: str ) -> Tuple[Union[Dict, None], Union[List[str], None]]:", "'delay')) if plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'):", "python3 # Copyright 2014-2018 PUNCH Cyber Analytics Group # # Licensed under the", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "#!/usr/bin/env python3 # Copyright 2014-2018 PUNCH Cyber Analytics Group # # Licensed under", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "'useragent' in plugin_opts: self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent')", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= None url = f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent': self.useragent} filename =", "Static Analysis’, # 160: 'Windows 10 64 bit’, # 110: 'Windows 7 64", "plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay')) if plugin_opts", "self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key was not provided\") def scan(self, payload: Payload,", "specific language governing permissions and # limitations under the License. \"\"\" Overview ========", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "plugin_opts and 'delay' in plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "ID: # 300: 'Linux (Ubuntu 16.04, 64 bit)', # 200: 'Android Static Analysis’,", "was not provided\") if not self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key was not", "PUNCH Cyber Analytics Group # # Licensed under the Apache License, Version 2.0", "'sandbox_url') if plugin_opts and 'apikey' in plugin_opts: self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "return result, None except (JSONDecodeError, KeyError) as err: err = str(err) finally: count", "in ('IN_QUEUE', 'IN_PROGRESS'): return result, None except (JSONDecodeError, KeyError) as err: err =", "filename = payload.payload_meta.extra_data.get( 'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename = filename.decode() files", "plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts and 'apikey' in", "for a scan to complete and then parse the results \"\"\" count =", "= plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise", "config.getboolean('options', 'wait_for_results') if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was not provided\") if", "payload.content)} data = {'environment_id': self.environment_id} response = requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results", "= config.getint('options', 'environment_id') if plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif", "try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey, 'user-agent': self.useragent} response = requests.get(url,", "'filename', helpers.get_sha1(payload.content) ) if isinstance(filename, bytes): filename = filename.decode() files = {'file': (filename,", "None except (JSONDecodeError, KeyError) as err: err = str(err) finally: count += 1", "if plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts = int(plugin_opts['max_attempts']) elif config.has_option('options', 'max_attempts'): self.max_attempts", "Version 2.0 (the \"License\"); # you may not use this file except in", "and # limitations under the License. \"\"\" Overview ======== Scan payloads using Falcon", "Union, Tuple, List from stoq import helpers from stoq.plugins import WorkerPlugin from stoq.exceptions", "except in compliance with the License. # You may obtain a copy of", "self.apikey = config.get('options', 'apikey') if plugin_opts and 'delay' in plugin_opts: self.delay = int(plugin_opts['delay'])", "True if plugin_opts and 'sandbox_url' in plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'):", "= {'api-key': self.apikey, 'user-agent': self.useragent} response = requests.get(url, headers=headers) response.raise_for_status() result = response.json()", "plugin_opts) self.sandbox_url = None self.apikey = None self.delay = 30 self.max_attempts = 10", "1 if count >= self.max_attempts: err = f'Scan did not complete in time", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "self.useragent = plugin_opts['useragent'] elif config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent') if plugin_opts and", "plugin_opts: self.sandbox_url = plugin_opts['sandbox_url'] elif config.has_option('options', 'sandbox_url'): self.sandbox_url = config.get('options', 'sandbox_url') if plugin_opts", "import WorkerPlugin from stoq.exceptions import StoqPluginException from stoq import Payload, RequestMeta, WorkerResponse class", ">= self.max_attempts: err = f'Scan did not complete in time -- attempts: {count}'", "requests.post(url, data=data, files=files, headers=headers) response.raise_for_status() results = response.json() if self.wait_for_results: results, errors =", "100: ‘Windows 7 32 bit’ self.environment_id = 160 self.wait_for_results = True if plugin_opts", "KeyError) as err: err = str(err) finally: count += 1 if count >=", "str ) -> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait for a scan to", "'Windows 7 64 bit’, # 100: ‘Windows 7 32 bit’ self.environment_id = 160", "None self.apikey = None self.delay = 30 self.max_attempts = 10 self.useragent = 'Falcon", "provided\") if not self.apikey: raise StoqPluginException(\"Falcon Sandbox API Key was not provided\") def", "if not self.sandbox_url: raise StoqPluginException(\"Falcon Sandbox URL was not provided\") if not self.apikey:", "None url = f'{self.sandbox_url}/submit/file' headers = {'api-key': self.apikey, 'user-agent': self.useragent} filename = payload.payload_meta.extra_data.get(", ") -> Tuple[Union[Dict, None], Union[List[str], None]]: \"\"\" Wait for a scan to complete", "while count < self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers = {'api-key': self.apikey,", "self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay')) if plugin_opts and", "{'file': (filename, payload.content)} data = {'environment_id': self.environment_id} response = requests.post(url, data=data, files=files, headers=headers)", "config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay')) if plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts", "elif config.has_option('options', 'delay'): self.delay = int(config.get('options', 'delay')) if plugin_opts and 'max_attempts' in plugin_opts:", "2014-2018 PUNCH Cyber Analytics Group # # Licensed under the Apache License, Version", "elif config.has_option('options', 'useragent'): self.useragent = config.get('options', 'useragent') if plugin_opts and 'environment_id' in plugin_opts:", "self.apikey = plugin_opts['apikey'] elif config.has_option('options', 'apikey'): self.apikey = config.get('options', 'apikey') if plugin_opts and", "'apikey') if plugin_opts and 'delay' in plugin_opts: self.delay = int(plugin_opts['delay']) elif config.has_option('options', 'delay'):", "FalconSandboxPlugin(WorkerPlugin): def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None: super().__init__(config, plugin_opts) self.sandbox_url =", "= 'Falcon Sandbox' # Available environments ID: # 300: 'Linux (Ubuntu 16.04, 64", "= str(err) finally: count += 1 if count >= self.max_attempts: err = f'Scan", "plugin_opts and 'wait_for_results' in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results =", "in plugin_opts: self.wait_for_results = plugin_opts['wait_for_results'] elif config.has_option('options', 'wait_for_results'): self.wait_for_results = config.getboolean('options', 'wait_for_results') if", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "30 self.max_attempts = 10 self.useragent = 'Falcon Sandbox' # Available environments ID: #", "= 10 self.useragent = 'Falcon Sandbox' # Available environments ID: # 300: 'Linux", "API Key was not provided\") def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse:", "err = None while count < self.max_attempts: sleep(self.delay) try: url = f'{self.sandbox_url}/report/{job_id}/summary' headers", "payloads using Falcon Sandbox \"\"\" errors = None url = f'{self.sandbox_url}/submit/file' headers =", "finally: count += 1 if count >= self.max_attempts: err = f'Scan did not", "bit)', # 200: 'Android Static Analysis’, # 160: 'Windows 10 64 bit’, #", "'delay'): self.delay = int(config.get('options', 'delay')) if plugin_opts and 'max_attempts' in plugin_opts: self.max_attempts =", "Cyber Analytics Group # # Licensed under the Apache License, Version 2.0 (the", "def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse: \"\"\" Scan payloads using Falcon", "helpers from stoq.plugins import WorkerPlugin from stoq.exceptions import StoqPluginException from stoq import Payload,", "WorkerPlugin from stoq.exceptions import StoqPluginException from stoq import Payload, RequestMeta, WorkerResponse class FalconSandboxPlugin(WorkerPlugin):", "from json import JSONDecodeError from configparser import ConfigParser from typing import Dict, Optional," ]
[ "results2[i] = 1 else: results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 #", "str == ele: nums+=1 results[str] = nums def programFun2(arry_list): results2 = {} count", "{} count = 1 for i in arry_list: if i not in results2.keys:", "[a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list):", "grep # sed -i \"1,2p\" nohup.log # grep # tail -f # netstat", "写在白纸上,完成之后拍照 # 2题 # ps -ef | grep # sed -i \"1,2p\" nohup.log", "# grep # tail -f # netstat # top # mv # cp", "for str in set_result: #strs = [] nums = 0 for ele in", "def programFun(arry_list): set_result = set(arry_list) results = {} for str in set_result: #strs", "in set_result: #strs = [] nums = 0 for ele in arry_list: if", "int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 # ps -ef | grep # sed", "results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 # ps -ef | grep", "\"1,2p\" nohup.log # grep # tail -f # netstat # top # mv", "# mv # cp # ssh # scp # 查看日志 # tail -f", "2题 # ps -ef | grep # sed -i \"1,2p\" nohup.log # grep", "-f # netstat # top # mv # cp # ssh # scp", "# {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result", "programFun2(arry_list): results2 = {} count = 1 for i in arry_list: if i", "programFun(arry_list): set_result = set(arry_list) results = {} for str in set_result: #strs =", "2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list) results", "nums def programFun2(arry_list): results2 = {} count = 1 for i in arry_list:", "results = {} for str in set_result: #strs = [] nums = 0", "# top # mv # cp # ssh # scp # 查看日志 #", "arry_list: if i not in results2.keys: results2[i] = 1 else: results2[i] = int(results2[i])+1", "[] nums = 0 for ele in arry_list: if str == ele: nums+=1", "sed -i \"1,2p\" nohup.log # grep # tail -f # netstat # top", "# tail -f # netstat # top # mv # cp # ssh", "for ele in arry_list: if str == ele: nums+=1 results[str] = nums def", "# EG: # [a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list =", "in arry_list: if str == ele: nums+=1 results[str] = nums def programFun2(arry_list): results2", "# [a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def", "count = 1 for i in arry_list: if i not in results2.keys: results2[i]", "results2 = {} count = 1 for i in arry_list: if i not", "EG: # [a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"]", "arry_list: if str == ele: nums+=1 results[str] = nums def programFun2(arry_list): results2 =", "ele in arry_list: if str == ele: nums+=1 results[str] = nums def programFun2(arry_list):", "-ef | grep # sed -i \"1,2p\" nohup.log # grep # tail -f", "1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list) results = {} for", "1 for i in arry_list: if i not in results2.keys: results2[i] = 1", "# 2题 # ps -ef | grep # sed -i \"1,2p\" nohup.log #", "# 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list) results = {}", "cp # ssh # scp # 查看日志 # tail -f nohup.out # tail", "results2.keys: results2[i] = 1 else: results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题", "1 else: results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 # ps -ef", "常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list) results =", "netstat # top # mv # cp # ssh # scp # 查看日志", "= nums def programFun2(arry_list): results2 = {} count = 1 for i in", "for i in arry_list: if i not in results2.keys: results2[i] = 1 else:", "{} for str in set_result: #strs = [] nums = 0 for ele", "1. 给一个数组,统计每个字母出现次数,并按照出现次数排序(不限制语言); # EG: # [a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题", "if i not in results2.keys: results2[i] = 1 else: results2[i] = int(results2[i])+1 programFun2(arry_list)", "results[str] = nums def programFun2(arry_list): results2 = {} count = 1 for i", "ps -ef | grep # sed -i \"1,2p\" nohup.log # grep # tail", "#strs = [] nums = 0 for ele in arry_list: if str ==", "== ele: nums+=1 results[str] = nums def programFun2(arry_list): results2 = {} count =", "scp # 查看日志 # tail -f nohup.out # tail -f nohup.log |grep \"ERROR\"", "grep # tail -f # netstat # top # mv # cp #", "nums = 0 for ele in arry_list: if str == ele: nums+=1 results[str]", "i not in results2.keys: results2[i] = 1 else: results2[i] = int(results2[i])+1 programFun2(arry_list) #", "= [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list) results = {} for str in", "str in set_result: #strs = [] nums = 0 for ele in arry_list:", "= 1 for i in arry_list: if i not in results2.keys: results2[i] =", "= set(arry_list) results = {} for str in set_result: #strs = [] nums", "= {} count = 1 for i in arry_list: if i not in", "[\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list) results = {} for str in set_result:", "programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 # ps -ef | grep # sed -i", "# 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list)", "ssh # scp # 查看日志 # tail -f nohup.out # tail -f nohup.log", "= 1 else: results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 # ps", "= 0 for ele in arry_list: if str == ele: nums+=1 results[str] =", "else: results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 # ps -ef |", "= {} for str in set_result: #strs = [] nums = 0 for", "set_result: #strs = [] nums = 0 for ele in arry_list: if str", "ele: nums+=1 results[str] = nums def programFun2(arry_list): results2 = {} count = 1", "# scp # 查看日志 # tail -f nohup.out # tail -f nohup.log |grep", "def programFun2(arry_list): results2 = {} count = 1 for i in arry_list: if", "# tail -f nohup.out # tail -f nohup.log |grep \"ERROR\" | wc 忘记了", "= int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 # 2题 # ps -ef | grep #", "= [] nums = 0 for ele in arry_list: if str == ele:", "# ssh # scp # 查看日志 # tail -f nohup.out # tail -f", "查看日志 # tail -f nohup.out # tail -f nohup.log |grep \"ERROR\" | wc", "{c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result =", "# sed -i \"1,2p\" nohup.log # grep # tail -f # netstat #", "给一个数组,统计每个字母出现次数,并按照出现次数排序(不限制语言); # EG: # [a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; # 1题 arry_list", "in results2.keys: results2[i] = 1 else: results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照 #", "-i \"1,2p\" nohup.log # grep # tail -f # netstat # top #", "# cp # ssh # scp # 查看日志 # tail -f nohup.out #", "| grep # sed -i \"1,2p\" nohup.log # grep # tail -f #", "set_result = set(arry_list) results = {} for str in set_result: #strs = []", "i in arry_list: if i not in results2.keys: results2[i] = 1 else: results2[i]", "nums+=1 results[str] = nums def programFun2(arry_list): results2 = {} count = 1 for", "in arry_list: if i not in results2.keys: results2[i] = 1 else: results2[i] =", "top # mv # cp # ssh # scp # 查看日志 # tail", "# netstat # top # mv # cp # ssh # scp #", "# ps -ef | grep # sed -i \"1,2p\" nohup.log # grep #", "0 for ele in arry_list: if str == ele: nums+=1 results[str] = nums", "# 1. 给一个数组,统计每个字母出现次数,并按照出现次数排序(不限制语言); # EG: # [a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数; #", "arry_list = [\"a\",\"a\",\"c\",\"b\",\"d\",\"c\",\"c\",\"c\",\"d\",\"d\"] def programFun(arry_list): set_result = set(arry_list) results = {} for str", "nohup.log # grep # tail -f # netstat # top # mv #", "tail -f # netstat # top # mv # cp # ssh #", "<filename>1.py # 1. 给一个数组,统计每个字母出现次数,并按照出现次数排序(不限制语言); # EG: # [a,a,c,b,d,c,c,c,d,d] # {c:4,d:3,a:2,b:1} # 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数;", "set(arry_list) results = {} for str in set_result: #strs = [] nums =", "not in results2.keys: results2[i] = 1 else: results2[i] = int(results2[i])+1 programFun2(arry_list) # 写在白纸上,完成之后拍照", "# 查看日志 # tail -f nohup.out # tail -f nohup.log |grep \"ERROR\" |", "# 写在白纸上,完成之后拍照 # 2题 # ps -ef | grep # sed -i \"1,2p\"", "if str == ele: nums+=1 results[str] = nums def programFun2(arry_list): results2 = {}", "mv # cp # ssh # scp # 查看日志 # tail -f nohup.out" ]
[ "index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json') print(\"DONE post-processing\") if __name__ == \"__main__\": main()", "latest_release = versions_data['latest_release'] if latest_release is None: latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release)", "len(label) > 0: downloads.append((label, str(filename))) return downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation", "= [] for filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label) >", "out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find files in the 'download' subfolder", "'').lower() if len(label) > 0: downloads.append((label, str(filename))) return downloads def main(): \"\"\"Main function.\"\"\"", "\"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release", "'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json') print(\"DONE post-processing\") if __name__ ==", "from versions import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\"", "<p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch): \"\"\"Write an index.html", "index.html that redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git',", "files in the 'download' subfolder of the given `folder`.\"\"\" downloads = [] for", "is None: latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json') print(\"DONE", "\"\"\"Find files in the 'download' subfolder of the given `folder`.\"\"\" downloads = []", "get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release is None: latest_release = 'master' print(\"Write index.html\")", "import Path import subprocess from versions import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html>", "python from pathlib import Path import subprocess from versions import get_versions_data, write_versions_json INDEX_HTML", "</body> </html> ''' def write_index_html(default_branch): \"\"\"Write an index.html that redirects to the DEFAULT_BRANCH.\"\"\"", "/> </head> <body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch):", "> 0: downloads.append((label, str(filename))) return downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on", "the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def", "import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\"", "html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body> <p>Got to <a", "#!/usr/bin/env python from pathlib import Path import subprocess from versions import get_versions_data, write_versions_json", "an index.html that redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch))", "DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder):", "latest_release is None: latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json')", "def write_index_html(default_branch): \"\"\"Write an index.html that redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\")", "<a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch): \"\"\"Write an index.html that redirects", "'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find files in the 'download' subfolder of the", "that redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add',", "the 'download' subfolder of the given `folder`.\"\"\" downloads = [] for filename in", "write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head>", "subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find files in the 'download' subfolder of", "out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find files in the 'download'", "function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release =", "content=\"0; url={default_branch}\" /> </head> <body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> '''", "def find_downloads(folder): \"\"\"Find files in the 'download' subfolder of the given `folder`.\"\"\" downloads", "if len(label) > 0: downloads.append((label, str(filename))) return downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing", "'index.html'], check=True) def find_downloads(folder): \"\"\"Find files in the 'download' subfolder of the given", "label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0: downloads.append((label, str(filename))) return downloads def", "open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find files", "in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0: downloads.append((label, str(filename))) return", "url={default_branch}\" /> </head> <body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def", "None: latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json') print(\"DONE post-processing\")", "</html> ''' def write_index_html(default_branch): \"\"\"Write an index.html that redirects to the DEFAULT_BRANCH.\"\"\" with", "main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads)", "versions_data['latest_release'] if latest_release is None: latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\")", "print(\"Post-processing documentation on gh-pages\") print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release']", "<body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch): \"\"\"Write an", "from pathlib import Path import subprocess from versions import get_versions_data, write_versions_json INDEX_HTML =", "with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find", "check=True) def find_downloads(folder): \"\"\"Find files in the 'download' subfolder of the given `folder`.\"\"\"", "versions import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0;", "downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather versions info\") versions_data", "return downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather versions info\")", "documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch): \"\"\"Write an index.html that redirects to the", "in the 'download' subfolder of the given `folder`.\"\"\" downloads = [] for filename", "find_downloads(folder): \"\"\"Find files in the 'download' subfolder of the given `folder`.\"\"\" downloads =", "print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json') print(\"DONE post-processing\") if __name__ == \"__main__\":", "Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0: downloads.append((label, str(filename))) return downloads", "<meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body>", "\"\"\"Write an index.html that redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh:", "<head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p>", "http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html>", "filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0: downloads.append((label, str(filename)))", "for filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0: downloads.append((label,", "versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release is None: latest_release = 'master'", "pathlib import Path import subprocess from versions import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE", "[] for filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0:", "documentation on gh-pages\") print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if", "given `folder`.\"\"\" downloads = [] for filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower()", "gh-pages\") print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release is", "write_index_html(default_branch): \"\"\"Write an index.html that redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as", "0: downloads.append((label, str(filename))) return downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\")", "= \"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0: downloads.append((label, str(filename))) return downloads def main():", "href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch): \"\"\"Write an index.html that redirects to", "<html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body> <p>Got to <a href=\"{default_branch}\">default", "</head> <body> <p>Got to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch): \"\"\"Write", "r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body> <p>Got to", "= 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json') print(\"DONE post-processing\") if __name__", "str(filename))) return downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather versions", "Path import subprocess from versions import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html>", "to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True)", "\"\".join(filename.suffixes).replace('.', '').lower() if len(label) > 0: downloads.append((label, str(filename))) return downloads def main(): \"\"\"Main", "<filename>.travis/docs_post_process.py<gh_stars>0 #!/usr/bin/env python from pathlib import Path import subprocess from versions import get_versions_data,", "import subprocess from versions import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html> <head>", "downloads = [] for filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if len(label)", "info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release is None: latest_release =", "downloads.append((label, str(filename))) return downloads def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather", "= r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body> <p>Got", "`folder`.\"\"\" downloads = [] for filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.', '').lower() if", "versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release is None: latest_release", "subfolder of the given `folder`.\"\"\" downloads = [] for filename in Path(folder).glob(r'download/*'): label", "subprocess from versions import get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html> <head> <meta", "\"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find files in", "the given `folder`.\"\"\" downloads = [] for filename in Path(folder).glob(r'download/*'): label = \"\".join(filename.suffixes).replace('.',", "to <a href=\"{default_branch}\">default documentation</a>.</p> </body> </html> ''' def write_index_html(default_branch): \"\"\"Write an index.html that", "latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data, outfile='versions.json') print(\"DONE post-processing\") if", "on gh-pages\") print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release", "def main(): \"\"\"Main function.\"\"\" print(\"Post-processing documentation on gh-pages\") print(\"Gather versions info\") versions_data =", "redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\", \"w\") as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'],", "INDEX_HTML = r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" /> </head> <body>", "= get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release is None: latest_release = 'master' print(\"Write", "of the given `folder`.\"\"\" downloads = [] for filename in Path(folder).glob(r'download/*'): label =", "get_versions_data, write_versions_json INDEX_HTML = r'''<!DOCTYPE html> <html> <head> <meta http-equiv=\"Refresh\" content=\"0; url={default_branch}\" />", "''' def write_index_html(default_branch): \"\"\"Write an index.html that redirects to the DEFAULT_BRANCH.\"\"\" with open(\"index.html\",", "as out_fh: out_fh.write(INDEX_HTML.format(default_branch=default_branch)) subprocess.run(['git', 'add', 'index.html'], check=True) def find_downloads(folder): \"\"\"Find files in the", "print(\"Gather versions info\") versions_data = get_versions_data(find_downloads=find_downloads) latest_release = versions_data['latest_release'] if latest_release is None:", "if latest_release is None: latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write versions.json\") write_versions_json(versions_data,", "= versions_data['latest_release'] if latest_release is None: latest_release = 'master' print(\"Write index.html\") write_index_html(latest_release) print(\"Write", "'download' subfolder of the given `folder`.\"\"\" downloads = [] for filename in Path(folder).glob(r'download/*'):" ]
[ "<reponame>baldingwizard/thermostatpi<gh_stars>1-10 from subprocess import call import time image=100000 while True: call([\"cp\", \"ramdisk/ui.png\", \"ramdisk/ui_\"+str(image)+\".png\"])", "subprocess import call import time image=100000 while True: call([\"cp\", \"ramdisk/ui.png\", \"ramdisk/ui_\"+str(image)+\".png\"]) time.sleep(0.2) image", "call import time image=100000 while True: call([\"cp\", \"ramdisk/ui.png\", \"ramdisk/ui_\"+str(image)+\".png\"]) time.sleep(0.2) image = image", "from subprocess import call import time image=100000 while True: call([\"cp\", \"ramdisk/ui.png\", \"ramdisk/ui_\"+str(image)+\".png\"]) time.sleep(0.2)", "time image=100000 while True: call([\"cp\", \"ramdisk/ui.png\", \"ramdisk/ui_\"+str(image)+\".png\"]) time.sleep(0.2) image = image + 1", "import time image=100000 while True: call([\"cp\", \"ramdisk/ui.png\", \"ramdisk/ui_\"+str(image)+\".png\"]) time.sleep(0.2) image = image +", "import call import time image=100000 while True: call([\"cp\", \"ramdisk/ui.png\", \"ramdisk/ui_\"+str(image)+\".png\"]) time.sleep(0.2) image =" ]
[ "for k1 in [278]: #[0]: for k2 in xrange(8, 240, 30): #[0]: f_list", "(Hz)') plt.title('Running average of the firing rate, N_r = %d' % (N_r)) plt.show()", "np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean = f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean", "f_rate_mean = np.array([]) for f_name in f_list: print 'Processing data from file %s'", "data[:, 1] f_rate_mean = f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean /", "= np.array([]) for f_name in f_list: print 'Processing data from file %s' %", "/ (1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)')", "[] for i in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1))", "grating_id_start in [7, 8]: # for grating_id in xrange(grating_id_start, 240, 30): # f_list", "data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean = f_rate_mean +", "data from file %s' % (f_name) data = np.genfromtxt(f_name, delimiter=' ') if (gids.size", "(1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average of", "0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution of firing rates over cells')", "cells') plt.title('Distribution of firing rates over cells') plt.show() # Get a running average", "1] f_rate_mean = f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean / (1.0", "plt.ylabel('Firing rate (Hz)') plt.title('Running average of the firing rate, N_r = %d' %", "for i in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat'", "f_rate_mean = f_rate_mean / (1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid')", "in f_list: print 'Processing data from file %s' % (f_name) data = np.genfromtxt(f_name,", "f_rate_mean / (1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate", "0, 0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av)", "np.genfromtxt(f_name, delimiter=' ') if (gids.size == 0): gids = data[:, 0] f_rate_mean =", "= np.genfromtxt(f_name, delimiter=' ') if (gids.size == 0): gids = data[:, 0] f_rate_mean", "= f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean / (1.0 * len(f_list))", "as plt #for grating_id_start in [7, 8]: # for grating_id in xrange(grating_id_start, 240,", "in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i))", "#[0]: for k2 in xrange(8, 240, 30): #[0]: f_list = [] for i", "a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r = 100 cumsum =", "== 0): gids = data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate = data[:, 1]", "cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 *", "xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids", "100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0", "+ f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean / (1.0 * len(f_list)) plt.plot(gids, f_rate_mean,", "f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid')", "matplotlib.pyplot as plt #for grating_id_start in [7, 8]: # for grating_id in xrange(grating_id_start,", "plt.ylabel('Number of cells') plt.title('Distribution of firing rates over cells') plt.show() # Get a", "#plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution", "240, 30): #[0]: f_list = [] for i in xrange(0, 5): #xrange(0, 10):", "solution\". N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:] -", "of cells') plt.title('Distribution of firing rates over cells') plt.show() # Get a running", "% (i)) gids = np.array([]) f_rate_mean = np.array([]) for f_name in f_list: print", "rate (Hz)') plt.title('Running average of the firing rate, N_r = %d' % (N_r))", "% (grating_id, i)) for k1 in [278]: #[0]: for k2 in xrange(8, 240,", "= 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) /", "(Hz)') plt.ylabel('Number of cells') plt.title('Distribution of firing rates over cells') plt.show() # Get", "plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number", "= np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r)", "plt.plot(gids, f_rate) f_rate_mean = f_rate_mean / (1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3)", "f_name in f_list: print 'Processing data from file %s' % (f_name) data =", "[7, 8]: # for grating_id in xrange(grating_id_start, 240, 30): # f_list = []", "% (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([]) f_rate_mean = np.array([])", "(Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells')", "xrange(8, 240, 30): #[0]: f_list = [] for i in xrange(0, 5): #xrange(0,", "# under \"Efficient solution\". N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av", "plt.title('Distribution of firing rates over cells') plt.show() # Get a running average of", "plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)')", "plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average of the firing rate, N_r = %d'", "use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r = 100 cumsum", "xrange(grating_id_start, 240, 30): # f_list = [] # for i in xrange(0, 10):", "0): gids = data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean", "f_rate_mean = f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean / (1.0 *", "under \"Efficient solution\". N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av =", "over cells') plt.show() # Get a running average of f_rate_mean; here, we use", "# for i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1", "% (f_name) data = np.genfromtxt(f_name, delimiter=' ') if (gids.size == 0): gids =", "data = np.genfromtxt(f_name, delimiter=' ') if (gids.size == 0): gids = data[:, 0]", "np.array([]) for f_name in f_list: print 'Processing data from file %s' % (f_name)", "f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean / (1.0 * len(f_list)) plt.plot(gids,", "grating_id in xrange(grating_id_start, 240, 30): # f_list = [] # for i in", "plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average of the firing rate, N_r =", "f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average of the firing rate, N_r", "f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0,", "N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average of the firing", "in [7, 8]: # for grating_id in xrange(grating_id_start, 240, 30): # f_list =", "- cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)')", "N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r])", "if (gids.size == 0): gids = data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate =", "i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1 in [278]:", "k1 in [278]: #[0]: for k2 in xrange(8, 240, 30): #[0]: f_list =", "= np.array([]) f_rate_mean = np.array([]) for f_name in f_list: print 'Processing data from", "[278]: #[0]: for k2 in xrange(8, 240, 30): #[0]: f_list = [] for", "f_list = [] for i in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2,", "np import matplotlib.pyplot as plt #for grating_id_start in [7, 8]: # for grating_id", "np.array([]) f_rate_mean = np.array([]) for f_name in f_list: print 'Processing data from file", "[] # for i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for", "= [] for i in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i,", "i in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' %", "for f_name in f_list: print 'Processing data from file %s' % (f_name) data", "in xrange(grating_id_start, 240, 30): # f_list = [] # for i in xrange(0,", "= data[:, 1] f_rate_mean = f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean", "for i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1 in", "linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing", "(cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate", "f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1 in [278]: #[0]: for k2 in xrange(8,", "np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):],", "#for grating_id_start in [7, 8]: # for grating_id in xrange(grating_id_start, 240, 30): #", "k2 in xrange(8, 240, 30): #[0]: f_list = [] for i in xrange(0,", "#[0]: f_list = [] for i in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' %", "0] f_rate_mean = np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean = f_rate_mean + f_rate", "* len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show()", "plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1,", "of f_rate_mean; here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\".", "f_list: print 'Processing data from file %s' % (f_name) data = np.genfromtxt(f_name, delimiter='", "cells') plt.show() # Get a running average of f_rate_mean; here, we use a", "http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0))", "rates over cells') plt.show() # Get a running average of f_rate_mean; here, we", "f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([]) f_rate_mean =", "rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution of firing rates over cells') plt.show() #", "= f_rate_mean / (1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing", "plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution of firing", "xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1 in [278]: #[0]: for", "as np import matplotlib.pyplot as plt #for grating_id_start in [7, 8]: # for", "# for grating_id in xrange(grating_id_start, 240, 30): # f_list = [] # for", "plt.show() # Get a running average of f_rate_mean; here, we use a solution", "f_rate = data[:, 1] f_rate_mean = f_rate_mean + f_rate plt.plot(gids, f_rate) f_rate_mean =", "in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1 in [278]: #[0]:", "# Get a running average of f_rate_mean; here, we use a solution from", "gids = np.array([]) f_rate_mean = np.array([]) for f_name in f_list: print 'Processing data", "= np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean = f_rate_mean + f_rate plt.plot(gids, f_rate)", "import numpy as np import matplotlib.pyplot as plt #for grating_id_start in [7, 8]:", "(f_name) data = np.genfromtxt(f_name, delimiter=' ') if (gids.size == 0): gids = data[:,", "we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r = 100", "= (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing", "i)) for k1 in [278]: #[0]: for k2 in xrange(8, 240, 30): #[0]:", "') if (gids.size == 0): gids = data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate", "* N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average of the", "plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average of the firing rate,", "plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution of firing rates over cells') plt.show()", "from file %s' % (f_name) data = np.genfromtxt(f_name, delimiter=' ') if (gids.size ==", "running average of f_rate_mean; here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under", "here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r =", "f_rate plt.plot(gids, f_rate) f_rate_mean = f_rate_mean / (1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o',", "10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1 in [278]: #[0]: for k2", "plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate", "of firing rates over cells') plt.show() # Get a running average of f_rate_mean;", "solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean,", "30): # f_list = [] # for i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat'", "plt #for grating_id_start in [7, 8]: # for grating_id in xrange(grating_id_start, 240, 30):", "print 'Processing data from file %s' % (f_name) data = np.genfromtxt(f_name, delimiter=' ')", "for k2 in xrange(8, 240, 30): #[0]: f_list = [] for i in", "50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution of firing rates over", "len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500],", "f_list = [] # for i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id,", "bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution of firing rates", "firing rates over cells') plt.show() # Get a running average of f_rate_mean; here,", "#xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([])", "(1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend()", "average of f_rate_mean; here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient", "/ (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running average", "a running average of f_rate_mean; here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, #", "(gids.size == 0): gids = data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate = data[:,", "(i)) gids = np.array([]) f_rate_mean = np.array([]) for f_name in f_list: print 'Processing", "for grating_id in xrange(grating_id_start, 240, 30): # f_list = [] # for i", "30): #[0]: f_list = [] for i in xrange(0, 5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat'", "k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([]) f_rate_mean = np.array([]) for f_name in", "from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0,", "\"Efficient solution\". N_r = 100 cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0)) f_rate_mean_r_av = (cumsum[N_r:]", "file %s' % (f_name) data = np.genfromtxt(f_name, delimiter=' ') if (gids.size == 0):", "'Processing data from file %s' % (f_name) data = np.genfromtxt(f_name, delimiter=' ') if", "numpy as np import matplotlib.pyplot as plt #for grating_id_start in [7, 8]: #", "%s' % (f_name) data = np.genfromtxt(f_name, delimiter=' ') if (gids.size == 0): gids", "import matplotlib.pyplot as plt #for grating_id_start in [7, 8]: # for grating_id in", "'-o', linewidth=3) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1))", "= data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean = f_rate_mean", "cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0) plt.xlabel('gid') plt.ylabel('Firing rate (Hz)') plt.title('Running", "in [278]: #[0]: for k2 in xrange(8, 240, 30): #[0]: f_list = []", "Get a running average of f_rate_mean; here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean,", "gids = data[:, 0] f_rate_mean = np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean =", "(grating_id, i)) for k1 in [278]: #[0]: for k2 in xrange(8, 240, 30):", "240, 30): # f_list = [] # for i in xrange(0, 10): #", "in xrange(8, 240, 30): #[0]: f_list = [] for i in xrange(0, 5):", "= [] # for i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i))", "f_rate_mean; here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean, # under \"Efficient solution\". N_r", "# f_list = [] # for i in xrange(0, 10): # f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' %", "f_rate_mean = np.zeros(gids.size) f_rate = data[:, 1] f_rate_mean = f_rate_mean + f_rate plt.plot(gids,", "5): #xrange(0, 10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids =", "delimiter=' ') if (gids.size == 0): gids = data[:, 0] f_rate_mean = np.zeros(gids.size)", "8]: # for grating_id in xrange(grating_id_start, 240, 30): # f_list = [] #", "0)) f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r) plt.plot(gids[(N_r-1):], f_rate_mean_r_av) plt.ylim(bottom=0.0)", "# f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i)) for k1 in [278]: #[0]: for k2 in", "10): f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([]) f_rate_mean", "(k2, i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([]) f_rate_mean = np.array([]) for", "rate (Hz)') #plt.legend() plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of", "i, k1)) #f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([]) f_rate_mean = np.array([]) for f_name", "#f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i)) gids = np.array([]) f_rate_mean = np.array([]) for f_name in f_list:", "plt.show() plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1)) plt.xlabel('Firing rate (Hz)') plt.ylabel('Number of cells') plt.title('Distribution of", "f_rate) f_rate_mean = f_rate_mean / (1.0 * len(f_list)) plt.plot(gids, f_rate_mean, '-o', linewidth=3) plt.ylim(bottom=0.0)" ]
[]
[ "<filename>fancy_todo_list/application/entities/__init__.py from .base import RefId from .user import Username, User # from .task", "import RefId from .user import Username, User # from .task import Title, Task", "from .base import RefId from .user import Username, User # from .task import", ".base import RefId from .user import Username, User # from .task import Title," ]
[ "= Sheep(position=Position(-10, -10)) for i in range(0,10): print('-- Iteration {0} --'.format(i)) grass.move() print(grass)", "Sheep(position=Position(-10, -10)) for i in range(0,10): print('-- Iteration {0} --'.format(i)) grass.move() print(grass) sheep.move()", "'__main__': grass = Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10)) for i in range(0,10):", "Position from Sheep import Sheep from Grass import Grass if __name__ == '__main__':", "-10)) for i in range(0,10): print('-- Iteration {0} --'.format(i)) grass.move() print(grass) sheep.move() print(sheep)", "Sheep import Sheep from Grass import Grass if __name__ == '__main__': grass =", "__name__ == '__main__': grass = Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10)) for i", "== '__main__': grass = Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10)) for i in", "from Position import Position from Sheep import Sheep from Grass import Grass if", "= Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10)) for i in range(0,10): print('-- Iteration", "if __name__ == '__main__': grass = Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10)) for", "grass = Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10)) for i in range(0,10): print('--", "import Sheep from Grass import Grass if __name__ == '__main__': grass = Grass(position=Position(10,", "import Position from Sheep import Sheep from Grass import Grass if __name__ ==", "from Grass import Grass if __name__ == '__main__': grass = Grass(position=Position(10, 10)) sheep", "Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10)) for i in range(0,10): print('-- Iteration {0}", "sheep = Sheep(position=Position(-10, -10)) for i in range(0,10): print('-- Iteration {0} --'.format(i)) grass.move()", "from Sheep import Sheep from Grass import Grass if __name__ == '__main__': grass", "Sheep from Grass import Grass if __name__ == '__main__': grass = Grass(position=Position(10, 10))", "Grass import Grass if __name__ == '__main__': grass = Grass(position=Position(10, 10)) sheep =", "import Grass if __name__ == '__main__': grass = Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10,", "Grass if __name__ == '__main__': grass = Grass(position=Position(10, 10)) sheep = Sheep(position=Position(-10, -10))", "Position import Position from Sheep import Sheep from Grass import Grass if __name__", "10)) sheep = Sheep(position=Position(-10, -10)) for i in range(0,10): print('-- Iteration {0} --'.format(i))" ]
[ "1: # instance = qs.first() # else: # raise Http404(\"Product doesn't exist.\") instance", "= cart_obj return context # def get_object(self, *args, **kwargs): # request = self.request", "context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj", "django.shortcuts import render, get_object_or_404 from analytics.mixins import ObjectViewedMixin from carts.models import Cart from", "qs.count == 1: # instance = qs.first() # else: # raise Http404(\"Product doesn't", "# request = self.request # slug = self.kwargs.get('slug') # try: # instance =", "def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request)", "raise Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk) if instance is None: raise Http404(\"Product", "qs.first() # except: # raise Http404(\"Uhmm\") # return instance def product_detail_view(request, pk): #", "= Product.objects.all() context = { 'object_list': queryset } return render(request, \"products/list.html\", context) class", "context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj", "DetailView): queryset = Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all() template_name", "if instance is None: raise Http404(\"Product doesn't exist.\") context = { 'object': instance", "return context def product_list_view(request): queryset = Product.objects.all() context = { 'object_list': queryset }", "# try: # instance = Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: # raise Http404(\"Not", "# instance = qs.first() # else: # raise Http404(\"Product doesn't exist.\") instance =", "\"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin,", "*args, **kwargs): request = self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured()", "instance = Product.objects.get_by_id(pk) if instance is None: raise Http404(\"Product doesn't exist.\") context =", "Product.objects.get_by_id(pk) if instance is None: raise Http404(\"Product doesn't exist.\") context = { 'object':", "Http404(\"Uhmm\") # return instance def product_detail_view(request, pk): # instance = get_object_or_404(Product, pk=pk) #", "except: # print(\"Huh?\") # # qs = Product.objects.filter(pk=pk) # if qs.exists() and qs.count", "# except Product.DoesNotExist: # print('no product here') # raise Http404(\"Product doesn't exist.\") #", "def get_queryset(self, *args, **kwargs): request = self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset", "Http404(\"Product doesn't exist.\") context = { 'object': instance } return render(request, \"products/detail.html\", context)", "product_detail_view(request, pk): # instance = get_object_or_404(Product, pk=pk) # try: # instance = Product.objects.filter(pk=pk)", "{ 'object_list': queryset } return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset =", "new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def product_list_view(request): queryset = Product.objects.all()", "get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart']", "= \"products/list.html\" def get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj", "import Product from django.http import Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self,", "active=True) # except Product.DoesNotExist: # raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: # qs", "# except Product.DoesNotExist: # raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: # qs =", "pk): # instance = get_object_or_404(Product, pk=pk) # try: # instance = Product.objects.filter(pk=pk) #", "= Product.objects.filter(pk=pk) # if qs.exists() and qs.count == 1: # instance = qs.first()", "context # def get_object(self, *args, **kwargs): # request = self.request # slug =", "template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj ,", "cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def product_list_view(request): queryset", "ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all()", "= \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args,", "analytics.mixins import ObjectViewedMixin from carts.models import Cart from .models import Product from django.http", "= self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name = \"products/featured-detail.html\"", "from django.views.generic import ListView, DetailView from django.shortcuts import render, get_object_or_404 from analytics.mixins import", "from analytics.mixins import ObjectViewedMixin from carts.models import Cart from .models import Product from", "slug = self.kwargs.get('slug') # try: # instance = Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist:", "instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no product here') # raise Http404(\"Product", "Product.DoesNotExist: # print('no product here') # raise Http404(\"Product doesn't exist.\") # except: #", "*args, **kwargs): # request = self.request # slug = self.kwargs.get('slug') # try: #", "print(\"Huh?\") # # qs = Product.objects.filter(pk=pk) # if qs.exists() and qs.count == 1:", "Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset", "try: # instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no product here') #", "product here') # raise Http404(\"Product doesn't exist.\") # except: # print(\"Huh?\") # #", "\"products/list.html\" def get_queryset(self, *args, **kwargs): request = self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView):", "# if qs.exists() and qs.count == 1: # instance = qs.first() # else:", "# print(\"Huh?\") # # qs = Product.objects.filter(pk=pk) # if qs.exists() and qs.count ==", "instance = qs.first() # except: # raise Http404(\"Uhmm\") # return instance def product_detail_view(request,", "django.http import Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self, *args, **kwargs): request", "except Product.DoesNotExist: # print('no product here') # raise Http404(\"Product doesn't exist.\") # except:", "} return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name =", "except Product.DoesNotExist: # raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug,", "= self.kwargs.get('slug') # try: # instance = Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: #", "template_name = \"products/list.html\" def get_queryset(self, *args, **kwargs): request = self.request return Product.objects.featured() class", "# try: # instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no product here')", "# instance = get_object_or_404(Product, pk=pk) # try: # instance = Product.objects.filter(pk=pk) # except", "qs.exists() and qs.count == 1: # instance = qs.first() # else: # raise", "doesn't exist.\") instance = Product.objects.get_by_id(pk) if instance is None: raise Http404(\"Product doesn't exist.\")", "template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj ,", "Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all() template_name = \"products/list.html\" def", "print('no product here') # raise Http404(\"Product doesn't exist.\") # except: # print(\"Huh?\") #", "here') # raise Http404(\"Product doesn't exist.\") # except: # print(\"Huh?\") # # qs", "get_object_or_404 from analytics.mixins import ObjectViewedMixin from carts.models import Cart from .models import Product", "= Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all() template_name = \"products/list.html\"", "import ListView, DetailView from django.shortcuts import render, get_object_or_404 from analytics.mixins import ObjectViewedMixin from", ", new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def product_list_view(request): queryset =", "\"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs):", "# raise Http404(\"Uhmm\") # return instance def product_detail_view(request, pk): # instance = get_object_or_404(Product,", "context def product_list_view(request): queryset = Product.objects.all() context = { 'object_list': queryset } return", "pk=pk) # try: # instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no product", "# print('no product here') # raise Http404(\"Product doesn't exist.\") # except: # print(\"Huh?\")", "Product.objects.filter(pk=pk) # if qs.exists() and qs.count == 1: # instance = qs.first() #", "is None: raise Http404(\"Product doesn't exist.\") context = { 'object': instance } return", "from carts.models import Cart from .models import Product from django.http import Http404 class", "self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context #", "template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" def", "= Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def product_list_view(request): queryset = Product.objects.all() context", "'object_list': queryset } return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all()", "def product_list_view(request): queryset = Product.objects.all() context = { 'object_list': queryset } return render(request,", "DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all()", "= super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return", "= Product.objects.get(slug=slug, active=True) # instance = qs.first() # except: # raise Http404(\"Uhmm\") #", "doesn't exist.\") # except: # print(\"Huh?\") # # qs = Product.objects.filter(pk=pk) # if", "**kwargs): request = self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name", "super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context", "Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\"", "raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True) # instance", "Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True) # instance =", "import Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self, *args, **kwargs): request =", "Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context # def get_object(self, *args, **kwargs): # request", "active=True) # instance = qs.first() # except: # raise Http404(\"Uhmm\") # return instance", "**kwargs): # request = self.request # slug = self.kwargs.get('slug') # try: # instance", "Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no product here') # raise Http404(\"Product doesn't exist.\")", "exist.\") instance = Product.objects.get_by_id(pk) if instance is None: raise Http404(\"Product doesn't exist.\") context", "else: # raise Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk) if instance is None:", "class ProductListView(ListView): queryset = Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs): context =", "Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj", "Product from django.http import Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self, *args,", "# except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True) # instance = qs.first() #", "\"products/detail.html\" def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj =", "Product.DoesNotExist: # raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True)", "ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self, *args, **kwargs): request = self.request return Product.objects.featured()", "= super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return", "queryset } return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name", "if qs.exists() and qs.count == 1: # instance = qs.first() # else: #", "class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs): context", "product_list_view(request): queryset = Product.objects.all() context = { 'object_list': queryset } return render(request, \"products/list.html\",", "# raise Http404(\"Product doesn't exist.\") # except: # print(\"Huh?\") # # qs =", "except: # raise Http404(\"Uhmm\") # return instance def product_detail_view(request, pk): # instance =", "# instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no product here') # raise", "# qs = Product.objects.filter(pk=pk) # if qs.exists() and qs.count == 1: # instance", "Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk) if instance is None: raise Http404(\"Product doesn't", "import Cart from .models import Product from django.http import Http404 class ProductFeaturedListView(ListView): template_name", "Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def product_list_view(request): queryset = Product.objects.all() context =", "self.kwargs.get('slug') # try: # instance = Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: # raise", "queryset = Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args,", "import ObjectViewedMixin from carts.models import Cart from .models import Product from django.http import", "render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" class", "def product_detail_view(request, pk): # instance = get_object_or_404(Product, pk=pk) # try: # instance =", "# slug = self.kwargs.get('slug') # try: # instance = Product.objects.get(slug=slug, active=True) # except", "cart_obj return context def product_list_view(request): queryset = Product.objects.all() context = { 'object_list': queryset", "return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\"", "except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True) # instance = qs.first() # except:", "ListView, DetailView from django.shortcuts import render, get_object_or_404 from analytics.mixins import ObjectViewedMixin from carts.models", "\"products/list.html\" def get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj =", "# raise Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk) if instance is None: raise", "queryset = Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name", "class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset", "**kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def product_list_view(request):", "class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset =", "# instance = qs.first() # except: # raise Http404(\"Uhmm\") # return instance def", "django.views.generic import ListView, DetailView from django.shortcuts import render, get_object_or_404 from analytics.mixins import ObjectViewedMixin", "context = { 'object_list': queryset } return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView):", "raise Http404(\"Uhmm\") # return instance def product_detail_view(request, pk): # instance = get_object_or_404(Product, pk=pk)", "instance = Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: # raise Http404(\"Not Found..\") # except", "instance = get_object_or_404(Product, pk=pk) # try: # instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist:", "context['cart'] = cart_obj return context # def get_object(self, *args, **kwargs): # request =", "context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView):", "= \"products/list.html\" def get_queryset(self, *args, **kwargs): request = self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin,", "ProductDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset =", "# except: # raise Http404(\"Uhmm\") # return instance def product_detail_view(request, pk): # instance", "raise Http404(\"Product doesn't exist.\") # except: # print(\"Huh?\") # # qs = Product.objects.filter(pk=pk)", "get_queryset(self, *args, **kwargs): request = self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset =", "Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self, *args, **kwargs): request = self.request", "# def get_object(self, *args, **kwargs): # request = self.request # slug = self.kwargs.get('slug')", "= qs.first() # except: # raise Http404(\"Uhmm\") # return instance def product_detail_view(request, pk):", "return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView):", "return instance def product_detail_view(request, pk): # instance = get_object_or_404(Product, pk=pk) # try: #", "instance = qs.first() # else: # raise Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk)", "return context # def get_object(self, *args, **kwargs): # request = self.request # slug", "# # qs = Product.objects.filter(pk=pk) # if qs.exists() and qs.count == 1: #", "context['cart'] = cart_obj return context def product_list_view(request): queryset = Product.objects.all() context = {", "self.request # slug = self.kwargs.get('slug') # try: # instance = Product.objects.get(slug=slug, active=True) #", "Http404(\"Product doesn't exist.\") # except: # print(\"Huh?\") # # qs = Product.objects.filter(pk=pk) #", "template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args,", "get_object_or_404(Product, pk=pk) # try: # instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no", "None: raise Http404(\"Product doesn't exist.\") context = { 'object': instance } return render(request,", "qs.first() # else: # raise Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk) if instance", "qs = Product.objects.get(slug=slug, active=True) # instance = qs.first() # except: # raise Http404(\"Uhmm\")", "get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart']", "request = self.request # slug = self.kwargs.get('slug') # try: # instance = Product.objects.get(slug=slug,", "instance def product_detail_view(request, pk): # instance = get_object_or_404(Product, pk=pk) # try: # instance", "from .models import Product from django.http import Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\"", "= { 'object_list': queryset } return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin, DetailView): queryset", "from django.http import Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self, *args, **kwargs):", "super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context", "= Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context # def get_object(self, *args, **kwargs): #", "= Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs)", "cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context # def get_object(self,", "= Product.objects.get_by_id(pk) if instance is None: raise Http404(\"Product doesn't exist.\") context = {", "= cart_obj return context def product_list_view(request): queryset = Product.objects.all() context = { 'object_list':", "Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: # raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: #", "= Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: # raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned:", "DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView,", "**kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context # def", "instance is None: raise Http404(\"Product doesn't exist.\") context = { 'object': instance }", "self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name = \"products/featured-detail.html\" class", "Product.objects.all() context = { 'object_list': queryset } return render(request, \"products/list.html\", context) class ProductDetailView(ObjectViewedMixin,", "new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context # def get_object(self, *args, **kwargs):", "# instance = Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: # raise Http404(\"Not Found..\") #", "ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs): context =", "= self.request # slug = self.kwargs.get('slug') # try: # instance = Product.objects.get(slug=slug, active=True)", "DetailView from django.shortcuts import render, get_object_or_404 from analytics.mixins import ObjectViewedMixin from carts.models import", "# raise Http404(\"Not Found..\") # except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True) #", "= \"products/detail.html\" def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj", "== 1: # instance = qs.first() # else: # raise Http404(\"Product doesn't exist.\")", "Found..\") # except Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True) # instance = qs.first()", "Product.objects.get(slug=slug, active=True) # instance = qs.first() # except: # raise Http404(\"Uhmm\") # return", "= Product.objects.filter(pk=pk) # except Product.DoesNotExist: # print('no product here') # raise Http404(\"Product doesn't", "try: # instance = Product.objects.get(slug=slug, active=True) # except Product.DoesNotExist: # raise Http404(\"Not Found..\")", "get_object(self, *args, **kwargs): # request = self.request # slug = self.kwargs.get('slug') # try:", "Cart from .models import Product from django.http import Http404 class ProductFeaturedListView(ListView): template_name =", "self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def", "def get_object(self, *args, **kwargs): # request = self.request # slug = self.kwargs.get('slug') #", "# else: # raise Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk) if instance is", "= Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs)", ", new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context # def get_object(self, *args,", "\"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs): context", "and qs.count == 1: # instance = qs.first() # else: # raise Http404(\"Product", "= get_object_or_404(Product, pk=pk) # try: # instance = Product.objects.filter(pk=pk) # except Product.DoesNotExist: #", "ProductListView(ListView): queryset = Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs): context = super(ProductListView,", "render, get_object_or_404 from analytics.mixins import ObjectViewedMixin from carts.models import Cart from .models import", "queryset = Product.objects.all() template_name = \"products/detail.html\" def get_context_data(self,*args, **kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args,", "= qs.first() # else: # raise Http404(\"Product doesn't exist.\") instance = Product.objects.get_by_id(pk) if", "# except: # print(\"Huh?\") # # qs = Product.objects.filter(pk=pk) # if qs.exists() and", "# return instance def product_detail_view(request, pk): # instance = get_object_or_404(Product, pk=pk) # try:", "Product.MultipleObjectsReturned: # qs = Product.objects.get(slug=slug, active=True) # instance = qs.first() # except: #", ".models import Product from django.http import Http404 class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def", "queryset = Product.objects.featured() template_name = \"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all() template_name =", "qs = Product.objects.filter(pk=pk) # if qs.exists() and qs.count == 1: # instance =", "raise Http404(\"Product doesn't exist.\") context = { 'object': instance } return render(request, \"products/detail.html\",", "queryset = Product.objects.all() context = { 'object_list': queryset } return render(request, \"products/list.html\", context)", "= Product.objects.all() template_name = \"products/detail.html\" class ProductDetailSlugView(ObjectViewedMixin, DetailView): queryset = Product.objects.all() template_name =", "# qs = Product.objects.get(slug=slug, active=True) # instance = qs.first() # except: # raise", "from django.shortcuts import render, get_object_or_404 from analytics.mixins import ObjectViewedMixin from carts.models import Cart", "cart_obj return context # def get_object(self, *args, **kwargs): # request = self.request #", "request = self.request return Product.objects.featured() class ProductFeaturedDetailView(ObjectViewedMixin, DetailView): queryset = Product.objects.featured() template_name =", "ObjectViewedMixin from carts.models import Cart from .models import Product from django.http import Http404", "class ProductFeaturedListView(ListView): template_name = \"products/list.html\" def get_queryset(self, *args, **kwargs): request = self.request return", "def get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request)", "**kwargs): context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] =", "import render, get_object_or_404 from analytics.mixins import ObjectViewedMixin from carts.models import Cart from .models", "= \"products/featured-detail.html\" class ProductListView(ListView): queryset = Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs):", "Product.objects.all() template_name = \"products/list.html\" def get_context_data(self,*args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj", "**kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) cart_obj , new_obj = Cart.objects.new_or_get(self.request) context['cart'] =", "exist.\") # except: # print(\"Huh?\") # # qs = Product.objects.filter(pk=pk) # if qs.exists()", "carts.models import Cart from .models import Product from django.http import Http404 class ProductFeaturedListView(ListView):" ]
[ "file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break; if found_dialect ==", "= file.name.split('.')[0]; args = (); # add base (compiler) args += self.properties['baseargs']; sdk_name", "if result != None and len(result) > 0: args += (result,); file_path =", "= os.path.join(output_dir, object_file); # writing the object file path to the linkfilelist print", "('-x', language); else: # should this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used:", "this is displaying the command being issued for this compiler in the build", "product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList')", "result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None and len(result) > 0: args", "!= None and len(result) > 0: args += (result,); file_path = str(file.fileRef.fs_path.root_path); args", "skip_file = True; for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file =", "file_name = file.name.split('.')[0]; args = (); # add base (compiler) args += self.properties['baseargs'];", "= True; for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file = False;", "self.properties['arch']); # add diag args += ('',); # add output object_file = file_name", "True; for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file = False; break;", "for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file = False; break; if", "envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None and len(result) > 0: args += (result,);", "None: if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName'];", "add arch # args += ('-arch', self.properties['arch']); # add diag args += ('',);", "and len(result) > 0: args += (result,); file_path = str(file.fileRef.fs_path.root_path); args += ('-c',", "in args: flag = str(word) if flag != '\\'\\'': args_str += flag args_str", "+= ('-isysroot', sdk_path); # this is missing all the build settings, also needs", "config_dict); def build(self): build_system = self.properties['buildsystem']; arch = self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1];", "is displaying the command being issued for this compiler in the build phase", "+= flag args_str += ' '; print '\\t'+args_str; print ''; # this is", "hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var", "something in the xcspec args += ('-isysroot', sdk_path); # this is missing all", "compiler in the build phase args_str = ''; for word in args: flag", "output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values());", "object_file = file_name + '.o'; output_file_path = os.path.join(output_dir, object_file); # writing the object", "file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file", "args += ('-isysroot', sdk_path); # this is missing all the build settings, also", "language used: \"%s\"' % (language)); # I think this should be handled by", "if allowed_file_type_for_var in file_types: skip_file = False; break; if skip_file == True: continue;", "= file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in file_types:", "in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype);", "output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w');", "base (compiler) args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path'));", "args_str = ''; for word in args: flag = str(word) if flag !=", "arch # args += ('-arch', self.properties['arch']); # add diag args += ('',); #", "if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect", "print >> link_file_list_fd, output_file_path; args += ('-o', output_file_path) # this is displaying the", "False; identifier = file.fileRef.ftype; language = ''; while found_dialect == False: file_ref_spec =", "'\\'\\'': args_str += flag args_str += ' '; print '\\t'+args_str; print ''; #", "by something in the xcspec args += ('-isysroot', sdk_path); # this is missing", "= file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break; if found_dialect == True: args +=", "= (); # add base (compiler) args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path", "= str(word) if flag != '\\'\\'': args_str += flag args_str += ' ';", "path to the linkfilelist print >> link_file_list_fd, output_file_path; args += ('-o', output_file_path) #", "continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None and len(result) > 0:", "True: args += ('-x', language); else: # should this be an error? logging_helper.getLogger().warn('[clangcompiler]:", "filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings)", "envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes();", "phase args_str = ''; for word in args: flag = str(word) if flag", "found_dialect = False; identifier = file.fileRef.ftype; language = ''; while found_dialect == False:", "file_types: skip_file = False; break; if skip_file == True: continue; result = envar.commandLineFlag(build_system.environment,", "resolved_settings) == True: if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file", "build_system = self.properties['buildsystem']; arch = self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None,", "sdk_name, '--show-sdk-path')); # add language dialect found_dialect = False; identifier = file.fileRef.ftype; language", "object_file); # writing the object file path to the linkfilelist print >> link_file_list_fd,", "while found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if 'GccDialectName'", "__init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system = self.properties['buildsystem']; arch =", "...Helpers import logging_helper from ...Helpers import xcrun_helper from ...Helpers import path_helper class clangcompiler(xccompiler):", "build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier;", "(language)); # I think this should be handled by something in the xcspec", "'w'); for file in self.properties['files']: file_name = file.name.split('.')[0]; args = (); # add", "output_file_path = os.path.join(output_dir, object_file); # writing the object file path to the linkfilelist", "path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for file in self.properties['files']:", "(); # add base (compiler) args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path =", "language); else: # should this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"'", "= False; identifier = file.fileRef.ftype; language = ''; while found_dialect == False: file_ref_spec", "# # add arch # args += ('-arch', self.properties['arch']); # add diag args", "# add base (compiler) args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk',", "+= ('',); # add output object_file = file_name + '.o'; output_file_path = os.path.join(output_dir,", "all the build settings, also needs output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags =", "sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language dialect found_dialect = False; identifier", "len(result) > 0: args += (result,); file_path = str(file.fileRef.fs_path.root_path); args += ('-c', file_path);", "identifier = file.fileRef.ftype; language = ''; while found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier);", "path_helper class clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system", "else: language = file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break; if found_dialect == True:", "resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar", "# args += ('-arch', self.properties['arch']); # add diag args += ('',); # add", "envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True:", "should this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' % (language)); #", "# this is missing all the build settings, also needs output set resolved_settings", "(result,); file_path = str(file.fileRef.fs_path.root_path); args += ('-c', file_path); # # add arch #", "True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None and len(result) >", "True: if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file = True;", "link_file_list_fd = open(link_file_list, 'w'); for file in self.properties['files']: file_name = file.name.split('.')[0]; args =", "command being issued for this compiler in the build phase args_str = '';", "\"%s\"' % (language)); # I think this should be handled by something in", "args += ('-o', output_file_path) # this is displaying the command being issued for", "language dialect found_dialect = False; identifier = file.fileRef.ftype; language = ''; while found_dialect", "is missing all the build settings, also needs output set resolved_settings = build_system.environment.resolvedValues();", "# writing the object file path to the linkfilelist print >> link_file_list_fd, output_file_path;", "clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system = self.properties['buildsystem'];", "link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for file in self.properties['files']: file_name", "linkfilelist print >> link_file_list_fd, output_file_path; args += ('-o', output_file_path) # this is displaying", "the command being issued for this compiler in the build phase args_str =", "is running the compiler command # compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1] !=", "* from ...Helpers import logging_helper from ...Helpers import xcrun_helper from ...Helpers import path_helper", "+= ('-c', file_path); # # add arch # args += ('-arch', self.properties['arch']); #", "the xcspec args += ('-isysroot', sdk_path); # this is missing all the build", "environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types", "+= ('-o', output_file_path) # this is displaying the command being issued for this", "skip_file = False; break; if skip_file == True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings);", "False; break; if skip_file == True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result", "('-arch', self.properties['arch']); # add diag args += ('',); # add output object_file =", "output_file_path; args += ('-o', output_file_path) # this is displaying the command being issued", "('-isysroot', sdk_path); # this is missing all the build settings, also needs output", "missing all the build settings, also needs output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags", "'$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list,", "an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' % (language)); # I think this", "args_str += ' '; print '\\t'+args_str; print ''; # this is running the", "str(file.fileRef.fs_path.root_path); args += ('-c', file_path); # # add arch # args += ('-arch',", "''; # this is running the compiler command # compiler_output = xcrun_helper.make_subprocess_call(args); #", "== True: args += ('-x', language); else: # should this be an error?", "be handled by something in the xcspec args += ('-isysroot', sdk_path); # this", "'--show-sdk-path')); # add language dialect found_dialect = False; identifier = file.fileRef.ftype; language =", "logging_helper from ...Helpers import xcrun_helper from ...Helpers import path_helper class clangcompiler(xccompiler): def __init__(self,", "+ '.o'; output_file_path = os.path.join(output_dir, object_file); # writing the object file path to", "found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if 'GccDialectName' not", "= build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd", "# add output object_file = file_name + '.o'; output_file_path = os.path.join(output_dir, object_file); #", "+= ('-arch', self.properties['arch']); # add diag args += ('',); # add output object_file", "flag args_str += ' '; print '\\t'+args_str; print ''; # this is running", "== True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None and len(result)", "add base (compiler) args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name,", "'.o'; output_file_path = os.path.join(output_dir, object_file); # writing the object file path to the", "def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system = self.properties['buildsystem']; arch", "# compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0: # logging_helper.getLogger().error('[xcbuildsystem]: Compiler error", "= open(link_file_list, 'w'); for file in self.properties['files']: file_name = file.name.split('.')[0]; args = ();", "args: flag = str(word) if flag != '\\'\\'': args_str += flag args_str +=", "import * from ...Helpers import logging_helper from ...Helpers import xcrun_helper from ...Helpers import", "+= ('-x', language); else: # should this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language", "== False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if 'GccDialectName' not in", "''; for word in args: flag = str(word) if flag != '\\'\\'': args_str", "this compiler in the build phase args_str = ''; for word in args:", "add output object_file = file_name + '.o'; output_file_path = os.path.join(output_dir, object_file); # writing", "used: \"%s\"' % (language)); # I think this should be handled by something", "found_dialect == True: args += ('-x', language); else: # should this be an", "this should be handled by something in the xcspec args += ('-isysroot', sdk_path);", "running the compiler command # compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0:", "...Helpers import path_helper class clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def", "settings, also needs output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs()", "file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier", "being issued for this compiler in the build phase args_str = ''; for", "else: # should this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' %", "from ...Helpers import logging_helper from ...Helpers import xcrun_helper from ...Helpers import path_helper class", "from .xccompiler import * from ...Helpers import logging_helper from ...Helpers import xcrun_helper from", "= xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0: # logging_helper.getLogger().error('[xcbuildsystem]: Compiler error %s' %", "needs output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True,", "identifier = file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break; if", "args = (); # add base (compiler) args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT');", "else: break; if found_dialect == True: args += ('-x', language); else: # should", "xcrun_helper from ...Helpers import path_helper class clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler,", "displaying the command being issued for this compiler in the build phase args_str", "file in self.properties['files']: file_name = file.name.split('.')[0]; args = (); # add base (compiler)", "the linkfilelist print >> link_file_list_fd, output_file_path; args += ('-o', output_file_path) # this is", "set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for", "= True; else: break; if found_dialect == True: args += ('-x', language); else:", "self.properties['buildsystem']; arch = self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir)", "result != None and len(result) > 0: args += (result,); file_path = str(file.fileRef.fs_path.root_path);", "if skip_file == True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None", "super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system = self.properties['buildsystem']; arch = self.properties['arch']; product_name =", "in envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file = False; break; if skip_file ==", "build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language dialect found_dialect = False;", "+= (result,); file_path = str(file.fileRef.fs_path.root_path); args += ('-c', file_path); # # add arch", "args += ('',); # add output object_file = file_name + '.o'; output_file_path =", "args += ('-x', language); else: # should this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown", "None and len(result) > 0: args += (result,); file_path = str(file.fileRef.fs_path.root_path); args +=", "open(link_file_list, 'w'); for file in self.properties['files']: file_name = file.name.split('.')[0]; args = (); #", "if found_dialect == True: args += ('-x', language); else: # should this be", "= xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language dialect found_dialect = False; identifier =", "'$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for file in", "('-o', output_file_path) # this is displaying the command being issued for this compiler", "envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar, 'FileTypes'): file_ref_spec =", "in file_types: skip_file = False; break; if skip_file == True: continue; result =", "0: args += (result,); file_path = str(file.fileRef.fs_path.root_path); args += ('-c', file_path); # #", "resolved_settings.values()); for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar, 'FileTypes'):", "= ''; for word in args: flag = str(word) if flag != '\\'\\'':", "if flag != '\\'\\'': args_str += flag args_str += ' '; print '\\t'+args_str;", "language = ''; while found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec !=", "file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break; if found_dialect == True: args += ('-x',", "build(self): build_system = self.properties['buildsystem']; arch = self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir =", "str(word) if flag != '\\'\\'': args_str += flag args_str += ' '; print", "I think this should be handled by something in the xcspec args +=", "= build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language dialect found_dialect =", "self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir,", "# add diag args += ('',); # add output object_file = file_name +", "file_name + '.o'; output_file_path = os.path.join(output_dir, object_file); # writing the object file path", "'; print '\\t'+args_str; print ''; # this is running the compiler command #", "True, resolved_settings.values()); for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar,", "os.path.join(output_dir, object_file); # writing the object file path to the linkfilelist print >>", "in the xcspec args += ('-isysroot', sdk_path); # this is missing all the", "the object file path to the linkfilelist print >> link_file_list_fd, output_file_path; args +=", "print '\\t'+args_str; print ''; # this is running the compiler command # compiler_output", "file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break;", "import path_helper class clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self):", ">> link_file_list_fd, output_file_path; args += ('-o', output_file_path) # this is displaying the command", "print ''; # this is running the compiler command # compiler_output = xcrun_helper.make_subprocess_call(args);", "envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file = False; break; if skip_file == True:", "file_types = file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in", "= False; break; if skip_file == True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if", "''; while found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if", "+= self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language", "'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var in", "> 0: args += (result,); file_path = str(file.fileRef.fs_path.root_path); args += ('-c', file_path); #", "allowed_file_type_for_var in file_types: skip_file = False; break; if skip_file == True: continue; result", "= ''; while found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec != None:", "if file_ref_spec != None: if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else:", "xcspec args += ('-isysroot', sdk_path); # this is missing all the build settings,", "self.properties['files']: file_name = file.name.split('.')[0]; args = (); # add base (compiler) args +=", "# should this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' % (language));", "xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language dialect found_dialect = False; identifier = file.fileRef.ftype;", "= file.fileRef.ftype; language = ''; while found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if", "== True, resolved_settings.values()); for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if", "file path to the linkfilelist print >> link_file_list_fd, output_file_path; args += ('-o', output_file_path)", "for word in args: flag = str(word) if flag != '\\'\\'': args_str +=", "build settings, also needs output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar:", "this is running the compiler command # compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1]", "sdk_path); # this is missing all the build settings, also needs output set", "should be handled by something in the xcspec args += ('-isysroot', sdk_path); #", "word in args: flag = str(word) if flag != '\\'\\'': args_str += flag", "os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for file in self.properties['files']: file_name = file.name.split('.')[0];", "if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types =", "= file_name + '.o'; output_file_path = os.path.join(output_dir, object_file); # writing the object file", "# add language dialect found_dialect = False; identifier = file.fileRef.ftype; language = '';", "handled by something in the xcspec args += ('-isysroot', sdk_path); # this is", "self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language dialect", "this is missing all the build settings, also needs output set resolved_settings =", "% (language)); # I think this should be handled by something in the", "True; else: break; if found_dialect == True: args += ('-x', language); else: #", "# I think this should be handled by something in the xcspec args", "= envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None and len(result) > 0: args +=", "compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0: # logging_helper.getLogger().error('[xcbuildsystem]: Compiler error %s'", "!= '\\'\\'': args_str += flag args_str += ' '; print '\\t'+args_str; print '';", "object file path to the linkfilelist print >> link_file_list_fd, output_file_path; args += ('-o',", "lookup_dict=resolved_settings); if result != None and len(result) > 0: args += (result,); file_path", "this be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' % (language)); # I", "add language dialect found_dialect = False; identifier = file.fileRef.ftype; language = ''; while", "('-c', file_path); # # add arch # args += ('-arch', self.properties['arch']); # add", "'GccDialectName' not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect =", "args += (result,); file_path = str(file.fileRef.fs_path.root_path); args += ('-c', file_path); # # add", "build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var", "file.fileRef.ftype; language = ''; while found_dialect == False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec", "!= None: if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language =", "the build settings, also needs output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda", "('',); # add output object_file = file_name + '.o'; output_file_path = os.path.join(output_dir, object_file);", "environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar in environment_variables_has_flags: if", "add diag args += ('',); # add output object_file = file_name + '.o';", "dialect found_dialect = False; identifier = file.fileRef.ftype; language = ''; while found_dialect ==", "in the build phase args_str = ''; for word in args: flag =", "in self.properties['files']: file_name = file.name.split('.')[0]; args = (); # add base (compiler) args", "the compiler command # compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0: #", "...Helpers import xcrun_helper from ...Helpers import path_helper class clangcompiler(xccompiler): def __init__(self, compiler, config_dict):", "import logging_helper from ...Helpers import xcrun_helper from ...Helpers import path_helper class clangcompiler(xccompiler): def", "def build(self): build_system = self.properties['buildsystem']; arch = self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir", "issued for this compiler in the build phase args_str = ''; for word", "also needs output set resolved_settings = build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() ==", "args += ('-c', file_path); # # add arch # args += ('-arch', self.properties['arch']);", "product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for file in self.properties['files']: file_name = file.name.split('.')[0]; args", "command # compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0: # logging_helper.getLogger().error('[xcbuildsystem]: Compiler", "build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd =", "logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' % (language)); # I think this should be", "= build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for", "compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system = self.properties['buildsystem']; arch = self.properties['arch'];", "in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect = True; else:", "'\\t'+args_str; print ''; # this is running the compiler command # compiler_output =", "file.name.split('.')[0]; args = (); # add base (compiler) args += self.properties['baseargs']; sdk_name =", "for this compiler in the build phase args_str = ''; for word in", "os from .xccompiler import * from ...Helpers import logging_helper from ...Helpers import xcrun_helper", "build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar in environment_variables_has_flags:", "args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add", "build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list = os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for file", "error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' % (language)); # I think this should", "build phase args_str = ''; for word in args: flag = str(word) if", "import xcrun_helper from ...Helpers import path_helper class clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler,", "for file in self.properties['files']: file_name = file.name.split('.')[0]; args = (); # add base", "= build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var in envar.FileTypes: if", "file_path); # # add arch # args += ('-arch', self.properties['arch']); # add diag", "break; if skip_file == True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result !=", "False: file_ref_spec = build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if 'GccDialectName' not in file_ref_spec.contents.keys():", "# this is running the compiler command # compiler_output = xcrun_helper.make_subprocess_call(args); # if", "file_path = str(file.fileRef.fs_path.root_path); args += ('-c', file_path); # # add arch # args", "' '; print '\\t'+args_str; print ''; # this is running the compiler command", "= self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list =", "self).__init__(compiler, config_dict); def build(self): build_system = self.properties['buildsystem']; arch = self.properties['arch']; product_name = build_system.environment.parseKey(None,", "# this is displaying the command being issued for this compiler in the", "be an error? logging_helper.getLogger().warn('[clangcompiler]: unknown language used: \"%s\"' % (language)); # I think", "= os.path.join(output_dir, product_name+'.LinkFileList') link_file_list_fd = open(link_file_list, 'w'); for file in self.properties['files']: file_name =", "file_ref_spec != None: if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language", "envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) ==", ".xccompiler import * from ...Helpers import logging_helper from ...Helpers import xcrun_helper from ...Helpers", "class clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system =", "break; if found_dialect == True: args += ('-x', language); else: # should this", "config_dict): super(clangcompiler, self).__init__(compiler, config_dict); def build(self): build_system = self.properties['buildsystem']; arch = self.properties['arch']; product_name", "= str(file.fileRef.fs_path.root_path); args += ('-c', file_path); # # add arch # args +=", "allowed_file_type_for_var in envar.FileTypes: if allowed_file_type_for_var in file_types: skip_file = False; break; if skip_file", "diag args += ('',); # add output object_file = file_name + '.o'; output_file_path", "sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); # add language dialect found_dialect", "not in file_ref_spec.contents.keys(): identifier = file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect = True;", "arch = self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1]; path_helper.create_directories(output_dir) link_file_list", "language = file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break; if found_dialect == True: args", "xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0: # logging_helper.getLogger().error('[xcbuildsystem]: Compiler error %s' % compiler_output[0]);", "= file_ref_spec.basedOn.identifier; else: language = file_ref_spec.contents['GccDialectName']; found_dialect = True; else: break; if found_dialect", "for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment, resolved_settings) == True: if hasattr(envar, 'FileTypes'): file_ref_spec", "= self.properties['buildsystem']; arch = self.properties['arch']; product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1]; output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1];", "= build_system.getSpecForIdentifier(identifier); if file_ref_spec != None: if 'GccDialectName' not in file_ref_spec.contents.keys(): identifier =", "from ...Helpers import xcrun_helper from ...Helpers import path_helper class clangcompiler(xccompiler): def __init__(self, compiler,", "file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file = True; for allowed_file_type_for_var in envar.FileTypes:", "link_file_list_fd, output_file_path; args += ('-o', output_file_path) # this is displaying the command being", "flag = str(word) if flag != '\\'\\'': args_str += flag args_str += '", "writing the object file path to the linkfilelist print >> link_file_list_fd, output_file_path; args", "= build_system.environment.resolvedValues(); environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar in", "# add arch # args += ('-arch', self.properties['arch']); # add diag args +=", "args_str += flag args_str += ' '; print '\\t'+args_str; print ''; # this", "(compiler) args += self.properties['baseargs']; sdk_name = build_system.environment.valueForKey('SDKROOT'); sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path')); #", "= filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values()); for envar in environment_variables_has_flags: if envar.satisfiesCondition(build_system.environment,", "think this should be handled by something in the xcspec args += ('-isysroot',", "args += ('-arch', self.properties['arch']); # add diag args += ('',); # add output", "found_dialect = True; else: break; if found_dialect == True: args += ('-x', language);", "== True: if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file =", "to the linkfilelist print >> link_file_list_fd, output_file_path; args += ('-o', output_file_path) # this", "+= ' '; print '\\t'+args_str; print ''; # this is running the compiler", "compiler command # compiler_output = xcrun_helper.make_subprocess_call(args); # if compiler_output[1] != 0: # logging_helper.getLogger().error('[xcbuildsystem]:", "# if compiler_output[1] != 0: # logging_helper.getLogger().error('[xcbuildsystem]: Compiler error %s' % compiler_output[0]); link_file_list_fd.close();", "output object_file = file_name + '.o'; output_file_path = os.path.join(output_dir, object_file); # writing the", "unknown language used: \"%s\"' % (language)); # I think this should be handled", "flag != '\\'\\'': args_str += flag args_str += ' '; print '\\t'+args_str; print", "if hasattr(envar, 'FileTypes'): file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype); file_types = file_ref_spec.inheritedTypes(); skip_file = True; for", "output_file_path) # this is displaying the command being issued for this compiler in", "from ...Helpers import path_helper class clangcompiler(xccompiler): def __init__(self, compiler, config_dict): super(clangcompiler, self).__init__(compiler, config_dict);", "import os from .xccompiler import * from ...Helpers import logging_helper from ...Helpers import", "the build phase args_str = ''; for word in args: flag = str(word)", "skip_file == True: continue; result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings); if result != None and" ]
[ "\\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with", "Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name", "\"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\", "+= \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\"", "= line[32:].strip() if l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\",", "open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx +=", "'w') as file: file.write(table_stat) idx += 1 table_per = \"\" table_stat = \"\"", "if l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if", "\"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\"", "main(): with open('train.log', 'r') as f: lines = f.readlines() table_name = \"\" table_per", "+table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w')", "pdb import re \"\"\" \"\"\" STOP_WORDS = [\"mean living area\", \"Renovation\", \"Noise level\",", "{} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent, value = l.split(':') table_per +=", "open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx += 1 table_per = \"\" table_stat =", "1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% &", "\\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >=", "Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3}", "dummy, *feature, percent = l.split() except Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\"))", "\\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"}", "as file: file.write(table_featue) idx += 1 table_per = \"\" table_stat = \"\" table_featue", "l = line[32:].strip() if l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics for:'): table_name =", "0.0001: table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent, value", "\\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try:", "if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\": table_name =", "0 idx = 0 for line in lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*}", "table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\%", "line[32:].strip() if l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\")", "\"ExtraTree\\_train\": table_name = \"Extra Trees\" if counter == 0: table_per += \"\"\" \\\\begin{table*}[ht]", "\"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}}", "\\midrule\"\"\" if re.match(regex, l): try: nr, dummy, *feature, percent = l.split() except Exception:", "\\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage}", "idx = 0 for line in lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\"", "& Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule", "= 0 table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with", "counter >= 2: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\"", "if feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature", "file.write(table_featue) idx += 1 table_per = \"\" table_stat = \"\" table_featue = \"\"", "\\end{minipage} \"\"\" elif counter >= 2: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"}", ">= 2: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat", "{} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c == 0:", "table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule", "table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w')", "Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex", "STATS_WORD: name, value = l.split(':') table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"),", "\\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering", "\"\" table_stat = \"\" table_featue = \"\" feature_c = 0 counter = 0", "\"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\"", "<filename>log2tex.py import pdb import re \"\"\" \"\"\" STOP_WORDS = [\"mean living area\", \"Renovation\",", "= l.split() except Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent >", "\"AdaBoost\" elif table_name == \"xgb\": table_name = \"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name", "with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx", "\"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"]", "re.match(regex, l): try: nr, dummy, *feature, percent = l.split() except Exception: pdb.set_trace() percent", "= [\"mean living area\", \"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked", "\"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >=", "\\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD: name, value = l.split(':') table_stat += \"\"\"", "Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}}", "\\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"}", "+table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >= 2: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\"", "\"\" table_per = \"\" table_stat = \"\" table_featue = \"\" feature_c = 0", "file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file:", "STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex =", "\\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\"", "with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx", "\"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file:", "+= \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in", "table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\", "& {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c == 0: table_featue", "\\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\"", "\"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue +=", "\"xgb\": table_name = \"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name = \"Extra Trees\" if", "if table_name == \"adaboost\": table_name = \"AdaBoost\" elif table_name == \"xgb\": table_name =", "\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\", "\\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name", "\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file:", "file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx += 1 table_per =", "\\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"}", "\"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\"", "& Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try: nr, dummy, *feature, percent", "Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth}", "\"\"\"\\end{table*} \"\"\" counter = 0 table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as", "\"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main(): with open('train.log',", "\\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht]", "1 table_per = \"\" table_stat = \"\" table_featue = \"\" counter = (counter+1)", "\"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file:", "\\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage}", "*feature, percent = l.split() except Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if", "+= \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent, value = l.split(':')", "\\midrule\"\"\" if feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule", "in STATS_WORD: name, value = l.split(':') table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\",", "\"\" table_featue = \"\" feature_c = 0 counter = 0 idx = 0", "0 counter = 0 idx = 0 for line in lines: if line.startswith(\"BREAK\"):", "feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature &", "= \"\" table_featue = \"\" counter = (counter+1) % 3 if __name__ ==", "\\midrule\"\"\" if l.split()[0] in STATS_WORD: name, value = l.split(':') table_stat += \"\"\" {}", "table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if", "import re \"\"\" \"\"\" STOP_WORDS = [\"mean living area\", \"Renovation\", \"Noise level\", \"Outlier", "+= 1 table_per = \"\" table_stat = \"\" continue l = line[32:].strip() if", "+= \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}}", "counter <= 1: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat", "re \"\"\" \"\"\" STOP_WORDS = [\"mean living area\", \"Renovation\", \"Noise level\", \"Outlier detection\",", "\"\"\" \"\"\" STOP_WORDS = [\"mean living area\", \"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\",", "Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent > 0.0001: table_featue +=", "\\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c", "for line in lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter = 0", "as file: file.write(table_stat) idx += 1 table_per = \"\" table_stat = \"\" continue", "= [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)')", "\\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert", "table_name = \"\" table_per = \"\" table_stat = \"\" table_featue = \"\" feature_c", "\\midrule\"\"\" if counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule", "table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\": table_name = \"AdaBoost\" elif table_name", "\"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if", "\\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}}", "Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3}", "value = l.split(':') table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if", "file: file.write(table_featue) idx += 1 table_per = \"\" table_stat = \"\" table_featue =", "\"\").replace(\")\", \"\")) if percent > 0.0001: table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"),", "l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\": table_name = \"AdaBoost\" elif table_name == \"xgb\":", "\"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent, value = l.split(':') table_per", "1 table_per = \"\" table_stat = \"\" continue l = line[32:].strip() if l.startswith('-')", "with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx += 1 table_per = \"\" table_stat", "\\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0]", "table_stat = \"\" table_featue = \"\" counter = (counter+1) % 3 if __name__", "= \"\" table_stat = \"\" continue l = line[32:].strip() if l.startswith('-') or l.startswith('='):", "Ranking'): if feature_c == 0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{", "\\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue +=", "except Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent > 0.0001: table_featue", "\"\"\" counter = 0 table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file:", "\\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if counter >= 1: table_per", "feature_c = 0 counter = 0 idx = 0 for line in lines:", "\"\" table_featue = \"\" counter = (counter+1) % 3 if __name__ == \"__main__\":", "= f.readlines() table_name = \"\" table_per = \"\" table_stat = \"\" table_featue =", "pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent > 0.0001: table_featue += \"\"\"", "\\\\\\\\ \\midrule\"\"\" if counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}}", "\"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD =", "try: nr, dummy, *feature, percent = l.split() except Exception: pdb.set_trace() percent = float(percent.replace(\"(\",", "regex = re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r') as f: lines = f.readlines()", "Name & Wert in \\\\\\\\ \\midrule\"\"\" if counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth}", "if l.startswith('I'): percent, value = l.split(':') table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1],", "table_per = \"\" table_stat = \"\" table_featue = \"\" counter = (counter+1) %", "\"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main():", "counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in", "== \"xgb\": table_name = \"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name = \"Extra Trees\"", "level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\",", "= float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent > 0.0001: table_featue += \"\"\" {} &", "= \"\" table_per = \"\" table_stat = \"\" table_featue = \"\" feature_c =", "\\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\"", "for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\": table_name = \"AdaBoost\" elif", "if percent > 0.0001: table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if", "\"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\",", "with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with", "1: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\"", "STOP_WORDS = [\"mean living area\", \"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\",", "= \"\" table_featue = \"\" feature_c = 0 counter = 0 idx =", "name, value = l.split(':') table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0]))", "+= \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as", "2: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat +=", "+= \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >= 2: table_per", "table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\"", "\"\"\" STOP_WORDS = [\"mean living area\", \"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags", "\"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c == 0: table_featue += \"\"\" \\\\begin{table*}[ht]", "\\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter", "\"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c == 0: table_featue += \"\"\"", "if l.split()[0] in STATS_WORD: name, value = l.split(':') table_stat += \"\"\" {} &", "table_per = \"\" table_stat = \"\" continue l = line[32:].strip() if l.startswith('-') or", "+= \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule", "float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter <= 1: table_per += \"\"\" \\\\bottomrule \\end{tabular}}", "f: lines = f.readlines() table_name = \"\" table_per = \"\" table_stat = \"\"", "table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\", "{:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c == 0: table_featue +=", "\"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx),", "\\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD: name,", "= 0 counter = 0 idx = 0 for line in lines: if", "l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name", "\\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage}", "in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{", "+= \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as", "table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w')", "0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature &", ">= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\%", "\"Max\", \"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r') as", "\"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r') as f: lines =", "= l.split(':') table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'):", "\\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat +=", "& Wert in \\\\\\\\ \\midrule\"\"\" if counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering", "elif counter >= 2: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*}", "\\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >= 2: table_per += \"\"\"", "as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx += 1 table_per", "\\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1: table_featue", "+table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*}", "\\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"", "\\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD: name, value", "open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx),", "+= \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule", "\"adaboost\": table_name = \"AdaBoost\" elif table_name == \"xgb\": table_name = \"XGBoost\" elif table_name", "percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent > 0.0001: table_featue += \"\"\" {}", "\"Extra Trees\" if counter == 0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3}", "'w') as file: file.write(table_featue) idx += 1 table_per = \"\" table_stat = \"\"", "\\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex,", "= re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r') as f: lines = f.readlines() table_name", "\"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r')", "l.startswith('Feature Ranking'): if feature_c == 0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3}", "table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung", "\\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage}", "Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth}", "as f: lines = f.readlines() table_name = \"\" table_per = \"\" table_stat =", "model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\",", "= \"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name = \"Extra Trees\" if counter ==", "\"\" table_stat = \"\" table_featue = \"\" counter = (counter+1) % 3 if", "table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung", "as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx += 1 table_per", "\"\" continue l = line[32:].strip() if l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics for:'):", "\\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with", "\\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try: nr, dummy,", "+table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif", "+= \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% &", "\\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per)", "line in lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter = 0 table_stat", "table_name == \"ExtraTree\\_train\": table_name = \"Extra Trees\" if counter == 0: table_per +=", "table_name == \"xgb\": table_name = \"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name = \"Extra", "as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as", "f.readlines() table_name = \"\" table_per = \"\" table_stat = \"\" table_featue = \"\"", "== 0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung", "+table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue", "in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule", "= \"\" continue l = line[32:].strip() if l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics", "\\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try: nr, dummy, *feature, percent = l.split() except", "\\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if counter >= 1: table_per +=", "area\", \"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without Tags\"]", "\\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\"", "counter = 0 idx = 0 for line in lines: if line.startswith(\"BREAK\"): table_per", "'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w')", "\"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\",", "table_per += \"\"\"\\end{table*} \"\"\" counter = 0 table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx),", "\\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in", "in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD: name, value = l.split(':') table_stat +=", "{} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter <= 1: table_per", "\\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if counter", "& Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering", "\\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat", "table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent, value =", "{:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter <= 1: table_per += \"\"\"", "\\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue", "0 for line in lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter =", "Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try: nr, dummy, *feature, percent =", "re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r') as f: lines = f.readlines() table_name =", "continue if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\": table_name", "l.startswith('I'): percent, value = l.split(':') table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0]))", "\\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1: table_featue +=", "table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert", "gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\",", "l): try: nr, dummy, *feature, percent = l.split() except Exception: pdb.set_trace() percent =", "float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent > 0.0001: table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\",", "\\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3}", "+= \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter <=", "& Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{", "table_name == \"adaboost\": table_name = \"AdaBoost\" elif table_name == \"xgb\": table_name = \"XGBoost\"", ">= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung", "NR'): if counter <= 1: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage}", "if counter == 0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}}", "def main(): with open('train.log', 'r') as f: lines = f.readlines() table_name = \"\"", "\\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}}", "0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in", "\\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in", "in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{", "'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx += 1", "\\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try: nr,", "= 0 idx = 0 for line in lines: if line.startswith(\"BREAK\"): table_per +=", "table_featue = \"\" feature_c = 0 counter = 0 idx = 0 for", "\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if", "l.split() except Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent > 0.0001:", "= \"\" table_stat = \"\" table_featue = \"\" counter = (counter+1) % 3", "\\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if counter >= 1:", "l.startswith('PLOT NR'): if counter <= 1: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"}", "continue l = line[32:].strip() if l.startswith('-') or l.startswith('='): continue if l.startswith('Statistics for:'): table_name", "\")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter <= 1: table_per += \"\"\" \\\\bottomrule", "percent > 0.0001: table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'):", "\\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name &", "[\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def", "table_stat = \"\" table_featue = \"\" feature_c = 0 counter = 0 idx", "== \"adaboost\": table_name = \"AdaBoost\" elif table_name == \"xgb\": table_name = \"XGBoost\" elif", "\"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\", "with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx += 1 table_per = \"\" table_stat", "\\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth}", "\"\"\" elif counter >= 2: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage}", "table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >= 2:", "\"\" feature_c = 0 counter = 0 idx = 0 for line in", "> 0.0001: table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent,", "+= \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in", "'r') as f: lines = f.readlines() table_name = \"\" table_per = \"\" table_stat", "\\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >= 2: table_per += \"\"\" \\\\bottomrule", "\\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >= 2: table_per += \"\"\" \\\\bottomrule \\end{tabular}}", "= \"AdaBoost\" elif table_name == \"xgb\": table_name = \"XGBoost\" elif table_name == \"ExtraTree\\_train\":", "table_stat = \"\" continue l = line[32:].strip() if l.startswith('-') or l.startswith('='): continue if", "'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx += 1", "\\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as", "& {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent, value = l.split(':') table_per += \"\"\"", "lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter = 0 table_stat += \"\"\"\\end{table*}", "elif table_name == \"ExtraTree\\_train\": table_name = \"Extra Trees\" if counter == 0: table_per", "0 table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx),", "in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name &", "table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\"", "\"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main(): with", "= 0 for line in lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter", "value = l.split(':') table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT", "l.split(':') table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'):", "in \\\\\\\\ \\midrule\"\"\" if counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{", "l.split()[0] in STATS_WORD: name, value = l.split(':') table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\",", "file: file.write(table_stat) idx += 1 table_per = \"\" table_stat = \"\" continue l", "file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx += 1 table_per = \"\"", "file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) with open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue)", "in \\% & Abdeckung in \\%\\\\\\\\ \\midrule\"\"\" table_stat += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering", "percent) if l.startswith('I'): percent, value = l.split(':') table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\"", "\\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\"", "== \"ExtraTree\\_train\": table_name = \"Extra Trees\" if counter == 0: table_per += \"\"\"", "detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\",", "if l.startswith('PLOT NR'): if counter <= 1: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\"", "elif table_name == \"xgb\": table_name = \"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name =", "\\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"}", "\"Mean\", \"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r') as f:", "if l.startswith('Feature Ranking'): if feature_c == 0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering", "+= 1 table_per = \"\" table_stat = \"\" table_featue = \"\" counter =", "\\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in", "counter == 0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule", "line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter = 0 table_stat += \"\"\"\\end{table*} \"\"\" with", "\"\\_\") if table_name == \"adaboost\": table_name = \"AdaBoost\" elif table_name == \"xgb\": table_name", "\"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" elif counter >= 2: table_per +=", "or l.startswith('='): continue if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name ==", "open(\"./report/attachments/feature_impl{}.tex\".format(idx), 'w') as file: file.write(table_featue) idx += 1 table_per = \"\" table_stat =", "lines = f.readlines() table_name = \"\" table_per = \"\" table_stat = \"\" table_featue", "\"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat)", "if counter <= 1: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\"", "living area\", \"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without", "Trees\" if counter == 0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{", "\"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter <= 1:", "\"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name = \"Extra Trees\" if counter == 0:", "\"\" table_stat = \"\" continue l = line[32:].strip() if l.startswith('-') or l.startswith('='): continue", "\\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if counter >=", "\"Stacked model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\", \"Max\", \"Mean\",", "open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx +=", "if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter = 0 table_stat += \"\"\"\\end{table*} \"\"\"", "open('train.log', 'r') as f: lines = f.readlines() table_name = \"\" table_per = \"\"", "percent = l.split() except Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\", \"\")) if percent", "\"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD", "Wert in \\\\\\\\ \\midrule\"\"\" if counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3}", "table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter", "\\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD:", "counter = 0 table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per)", "Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try: nr, dummy, *feature,", "file.write(table_stat) idx += 1 table_per = \"\" table_stat = \"\" continue l =", "in lines: if line.startswith(\"BREAK\"): table_per += \"\"\"\\end{table*} \"\"\" counter = 0 table_stat +=", "table_featue = \"\" counter = (counter+1) % 3 if __name__ == \"__main__\": main()", "\"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c ==", "+table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*}", "\"\\_\"), percent) if l.startswith('I'): percent, value = l.split(':') table_per += \"\"\" {} &", "table_name = \"Extra Trees\" if counter == 0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth}", "file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx += 1 table_per = \"\"", "= l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\": table_name = \"AdaBoost\" elif table_name ==", "idx += 1 table_per = \"\" table_stat = \"\" continue l = line[32:].strip()", "{:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent) if l.startswith('I'): percent, value = l.split(':') table_per += \"\"\" {}", "+= \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}}", "\\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if feature_c >= 1:", "\"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung in \\%\\\\\\\\", "if counter >= 1: table_per += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung", "\\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_featue += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\"", "\"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}}", "with open('train.log', 'r') as f: lines = f.readlines() table_name = \"\" table_per =", "\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if", "\"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\" if", "== 0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature", "feature_c == 0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule", "l.startswith('='): continue if l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\":", "\"\")) if percent > 0.0001: table_featue += \"\"\" {} & {:.5f}\\\\\\\\\"\"\".format(feature[0].replace(\"_\", \"\\_\"), percent)", "float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c == 0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth}", "+= \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature Ranking'): if feature_c", "table_name = \"XGBoost\" elif table_name == \"ExtraTree\\_train\": table_name = \"Extra Trees\" if counter", "\\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l):", "\"Median\", \"Mean\"] regex = re.compile('^([0-9]{1,2}?)') def main(): with open('train.log', 'r') as f: lines", "\\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\"", "= \"\" feature_c = 0 counter = 0 idx = 0 for line", "l.startswith('Statistics for:'): table_name = l.split(\":\")[1].strip().replace(\"_\", \"\\_\") if table_name == \"adaboost\": table_name = \"AdaBoost\"", "file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx), 'w') as file: file.write(table_stat) idx += 1 table_per =", "Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD: name, value = l.split(':') table_stat", "<= 1: table_per += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat +=", "= \"\" table_stat = \"\" table_featue = \"\" feature_c = 0 counter =", "\\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\"", "+= \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in", "l.split(':') table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if", "in \\%\\\\\\\\ \\midrule\"\"\" if re.match(regex, l): try: nr, dummy, *feature, percent = l.split()", "\\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\"", "& Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD: name, value = l.split(':')", "table_per = \"\" table_stat = \"\" table_featue = \"\" feature_c = 0 counter", "\\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w') as file: file.write(table_per) with open(\"./report/attachments/ml_results2_{}.tex\".format(idx),", "+= \"\"\"\\end{table*} \"\"\" counter = 0 table_stat += \"\"\"\\end{table*} \"\"\" with open(\"./report/attachments/ml_results_{}.tex\".format(idx), 'w')", "= l.split(':') table_stat += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(name.replace(\":\", \"\").replace(\"%\", \"\"), float(value.split(\"%\")[0])) if l.startswith('Feature", "if feature_c == 0: table_featue += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}}", "\"Tags gruppieren\", \"Stacked model\", \"Without Tags\"] STATS_WORD = [\"R²-Score:\", \"MAPE:\", \"MdAPE:\", \"Min\", \"Max\",", "& {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if l.startswith('PLOT NR'): if counter <= 1: table_per +=", "nr, dummy, *feature, percent = l.split() except Exception: pdb.set_trace() percent = float(percent.replace(\"(\", \"\").replace(\")\",", "idx += 1 table_per = \"\" table_stat = \"\" table_featue = \"\" counter", "percent, value = l.split(':') table_per += \"\"\" {} & {:.2f}\\\\\\\\\"\"\".format(percent.split(\" \")[-1], float(value.split(\"%\")[0])) if", "1: table_featue += \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Feature & Gewichtung in", "Name & Wert in \\\\\\\\ \\midrule\"\"\" if l.split()[0] in STATS_WORD: name, value =", "[\"mean living area\", \"Renovation\", \"Noise level\", \"Outlier detection\", \"Steuerfuss\", \"Tags gruppieren\", \"Stacked model\",", "= \"Extra Trees\" if counter == 0: table_per += \"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering", "+= \"\"\"\\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Name & Wert in \\\\\\\\ \\midrule\"\"\"", "import pdb import re \"\"\" \"\"\" STOP_WORDS = [\"mean living area\", \"Renovation\", \"Noise", "table_stat += \"\"\" \\\\bottomrule \\end{tabular}} \\caption{\"\"\" +table_name+\"\"\"} \\end{minipage} \"\"\" table_featue += \"\"\" \\\\bottomrule", "table_name = \"AdaBoost\" elif table_name == \"xgb\": table_name = \"XGBoost\" elif table_name ==", "\"\"\" \\\\begin{table*}[ht] \\\\begin{minipage}{.3\\\\textwidth} \\centering \\\\ra{1.3} \\\\resizebox{\\\\textwidth}{!}{ \\\\begin{tabular}{@{}lr@{}} \\\\toprule Abweichung in \\% & Abdeckung", "if re.match(regex, l): try: nr, dummy, *feature, percent = l.split() except Exception: pdb.set_trace()" ]
[ "assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo,", "assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0)", "'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi')", "'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') ==", "mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo", "assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>',", "== 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo')", "'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>\") == 'hamer b')", "assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=',", "') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=',", "r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r')", "') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ')", "j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\",", "< (3, 0): from nose.plugins.skip import SkipTest raise SkipTest from mlxtend.text import generalize_names", "assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo')", "Samuel\") == 'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') ==", "r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ') == 'vandervaart,", "'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo,", "Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\",", "'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') ==", "assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>')", "s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure y')", "output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\",", "Robin\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\",", "== 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>\") == 'hamer", "'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart,", "== 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ')", "== 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') ==", "'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ') ==", "j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) ==", "== 'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo", "assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>')", "assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>')", "'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o,", "from nose.plugins.skip import SkipTest raise SkipTest from mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\")", "assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi') == 'xavi')", "assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o,", "output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=',", "'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo", "def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\")", "'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j')", "import sys if sys.version_info < (3, 0): from nose.plugins.skip import SkipTest raise SkipTest", "output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>\")", "== 'toure y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') ==", "s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') ==", "from mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") ==", "'toure y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo", "== 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa')", "s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>')", "sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s')", "j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo", "'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2)", "== 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=',", "test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") ==", "== 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\",", "generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o,", "0): from nose.plugins.skip import SkipTest raise SkipTest from mlxtend.text import generalize_names def test_generalize_names():", "assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo", "y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j')", "== 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ')", "'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o,", "assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ') == 'vandervaart, r')", "'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure", "') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\",", "nose.plugins.skip import SkipTest raise SkipTest from mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") ==", "== 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') ==", "import SkipTest raise SkipTest from mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo", "output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\",", "firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=',", "firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) ==", "'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) ==", "assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r')", "'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ')", "== 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2)", "assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') ==", "') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>\") ==", "if sys.version_info < (3, 0): from nose.plugins.skip import SkipTest raise SkipTest from mlxtend.text", "s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi') ==", "SkipTest raise SkipTest from mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s')", "== 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') ==", "== 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>, Rafael\", output_sep=', ')", "'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') == 'pozo j')", "j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>') == 'pozo j')", "s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie,", "Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>')", "assert(generalize_names('<NAME>') == 'pozo j') assert(generalize_names('<NAME>', firstname_output_letters=2) == 'pozo jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo", "sys if sys.version_info < (3, 0): from nose.plugins.skip import SkipTest raise SkipTest from", "SkipTest from mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\")", "Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie, r')", "jo') assert(generalize_names(\"<NAME>\", firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\",", "raise SkipTest from mlxtend.text import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o,", "firstname_output_letters=2) == 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ')", "(3, 0): from nose.plugins.skip import SkipTest raise SkipTest from mlxtend.text import generalize_names def", "import generalize_names def test_generalize_names(): assert(generalize_names(\"<NAME>\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s')", "sys.version_info < (3, 0): from nose.plugins.skip import SkipTest raise SkipTest from mlxtend.text import", "== 'etoo s') assert(generalize_names('Xavi') == 'xavi') assert(generalize_names('<NAME>') == 'toure y') assert(generalize_names('<NAME>') == 'pozo", "Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s')", "== 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s') assert(generalize_names(\"Eto'o, Samuel\") == 'etoo s')", "assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie,", "assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=', ') == 'vandervaart, r') assert(generalize_names(\"<NAME>,", "== 'etoo sa') assert(generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) == 'etoo') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') ==", "') == 'etoo, s') assert(generalize_names(\"<NAME>, Robin\", output_sep=', ') == 'vanpersie, r') assert(generalize_names(\"<NAME>\", output_sep=',", "output_sep=', ') == 'etoo, s') assert(generalize_names(\"Eto'o, Samuel\", output_sep=', ') == 'etoo, s') assert(generalize_names(\"<NAME>," ]
[ "= uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type() is \"unknown\": return self._unsupported() firmware =", "continue print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self) ->", "display in uefi_firmware module dumper = Dumper(fw_name, DIR_NAME, PE_DIR) if not dumper.dump_all(): exit()", "\"unknown\": return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name) -> bool:", "colorama.init(autoreset=True) # for correct color display in uefi_firmware module dumper = Dumper(fw_name, DIR_NAME,", "module: {module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self) -> bool: if", "no UI section, try to get a friendly name from the GUID database", "return False with open(self.fw_name, \"rb\") as fw: file_content = fw.read() parser = uefi_firmware.AutoParser(file_content)", "= self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def search_pe(d: str) -> list: return list(map(str,", "self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name) -> bool: \"\"\"get images", "= parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name) -> bool: \"\"\"get images from firmware\"\"\"", "def __init__(self, fw_name, dir_name, pe_dir): self.fw_name = fw_name self.dir_name = dir_name self.pe_dir =", "os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return False with open(self.fw_name, \"rb\") as fw: file_content", "f\"{module_name}_{index:#d}\" index += 1 continue return unique_name def get_module_name(self, module_path: str) -> str:", "uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index < 0: return", "see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name = module_name while True: if unique_name in", "-> str: # Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name =", "-> bool: print(\"[-] This type of binary is not supported\") return False def", "pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files", "for correct color display in uefi_firmware module dumper = Dumper(fw_name, DIR_NAME, PE_DIR) if", "dir_name, pe_dir): self.fw_name = fw_name self.dir_name = dir_name self.pe_dir = pe_dir self.modules =", "@staticmethod def search_pe(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str)", "r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self, fw_name, dir_name, pe_dir): self.fw_name = fw_name self.dir_name", "*.ui file ui_path = glob.glob(template)[0] with open(ui_path, \"rb\") as f: module_name = f.read()", "unique_name = module_name while True: if unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\" index", "module_name) shutil.copy(module_path, dst) def dump_all(self) -> bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name}", "te_files + pe_files: module_name = self.get_module_name(module_path) if not module_name: print(f\"Current module: unknown\") continue", "self.modules: unique_name = f\"{module_name}_{index:#d}\" index += 1 continue return unique_name def get_module_name(self, module_path:", "get_module_name(self, module_path: str) -> str: module_name = str() dir_name, _ = os.path.split(module_path) template", "fw: file_content = fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index =", "# for correct color display in uefi_firmware module dumper = Dumper(fw_name, DIR_NAME, PE_DIR)", "color display in uefi_firmware module dumper = Dumper(fw_name, DIR_NAME, PE_DIR) if not dumper.dump_all():", "module_name = f.read() module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name #", "is not supported\") return False def get_unique_name(self, module_name: str) -> str: # Get", "not module_name: module_name = module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def", "return module_name # no UI section, try to get a friendly name from", "print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self) -> bool:", ":]) if parser.type() is \"unknown\": return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return True", "return True def get_efi_images(fw_name) -> bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) # for", "self.modules.append(module_name) return module_name @staticmethod def search_pe(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod", "import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR = \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" )", "f: module_name = f.read() module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name", "{self.fw_name} file\") return False with open(self.fw_name, \"rb\") as fw: file_content = fw.read() parser", "index += 1 continue return unique_name def get_module_name(self, module_path: str) -> str: module_name", "with open(self.fw_name, \"rb\") as fw: file_content = fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type()", "module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no UI section, try to", "module_path in te_files + pe_files: module_name = self.get_module_name(module_path) if not module_name: print(f\"Current module:", "glob.glob(template)[0] with open(ui_path, \"rb\") as f: module_name = f.read() module_name = module_name.decode(\"utf-16le\") module_name", "Check {self.fw_name} file\") return False with open(self.fw_name, \"rb\") as fw: file_content = fw.read()", "images from firmware\"\"\" colorama.init(autoreset=True) # for correct color display in uefi_firmware module dumper", "str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) -> list: return", "section, try to get a friendly name from the GUID database file_guids =", "file_content = fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\")", "return unique_name def get_module_name(self, module_path: str) -> str: module_name = str() dir_name, _", "== 1: # try to get a friendly name from the *.ui file", "= dir_name self.pe_dir = pe_dir self.modules = list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if", "parser = uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index <", "list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() ->", "from .guid_db import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR = \"modules\" g_re_guid = re.compile(", "try to get a friendly name from the *.ui file ui_path = glob.glob(template)[0]", "# Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name = module_name while", "1: # try to get a friendly name from the *.ui file ui_path", "= str() dir_name, _ = os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) ==", "fw_name, dir_name, pe_dir): self.fw_name = fw_name self.dir_name = dir_name self.pe_dir = pe_dir self.modules", "= re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self, fw_name, dir_name, pe_dir): self.fw_name =", "if unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\" index += 1 continue return unique_name", "Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name = module_name while True:", "self.pe_dir = pe_dir self.modules = list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir):", "False def get_unique_name(self, module_name: str) -> str: # Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11", "def search_te(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name)", "= g_re_guid.findall(dir_name) if not file_guids: return str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name =", "= self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no UI section, try to get a", "= f.read() module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no", "str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name =", "= os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1: # try to get a friendly", "= 1 unique_name = module_name while True: if unique_name in self.modules: unique_name =", "<filename>tools/get_efi_images.py<gh_stars>100-1000 # SPDX-License-Identifier: MIT import glob import os import pathlib import re import", "- 40 :]) if parser.type() is \"unknown\": return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name)", "@staticmethod def search_te(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files =", "-> bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) # for correct color display in", "= file_content.find(b\"_FVH\") if fvh_index < 0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40", "re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self, fw_name, dir_name, pe_dir): self.fw_name = fw_name", "print(f\"[-] Check {self.fw_name} file\") return False with open(self.fw_name, \"rb\") as fw: file_content =", "# SPDX-License-Identifier: MIT import glob import os import pathlib import re import shutil", "in uefi_firmware module dumper = Dumper(fw_name, DIR_NAME, PE_DIR) if not dumper.dump_all(): exit() dumper.get_pe_files()", "DIR_NAME = \"all\" PE_DIR = \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper:", "ui_path = glob.glob(template)[0] with open(ui_path, \"rb\") as f: module_name = f.read() module_name =", "return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name) -> bool: \"\"\"get", "-> str: module_name = str() dir_name, _ = os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\")", "= pe_dir self.modules = list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir)", "module_name: str) -> str: # Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1", "unique_name def get_module_name(self, module_path: str) -> str: module_name = str() dir_name, _ =", "print(f\"Current module: unknown\") continue print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst)", "= UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name = module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return", "parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type() is \"unknown\": return self._unsupported() firmware", "from firmware\"\"\" colorama.init(autoreset=True) # for correct color display in uefi_firmware module dumper =", "self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path in te_files + pe_files: module_name = self.get_module_name(module_path)", "self.search_te(self.dir_name) for module_path in te_files + pe_files: module_name = self.get_module_name(module_path) if not module_name:", "_unsupported() -> bool: print(\"[-] This type of binary is not supported\") return False", "if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool: print(\"[-] This type of", "os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool: print(\"[-] This type of binary is not", "self.modules.append(module_name) return module_name # no UI section, try to get a friendly name", "bool: print(\"[-] This type of binary is not supported\") return False def get_unique_name(self,", "search_te(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files", "uefi_firmware module dumper = Dumper(fw_name, DIR_NAME, PE_DIR) if not dumper.dump_all(): exit() dumper.get_pe_files() return", "module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no UI section,", "continue return unique_name def get_module_name(self, module_path: str) -> str: module_name = str() dir_name,", "import os import pathlib import re import shutil import colorama import uefi_firmware from", "to get a friendly name from the *.ui file ui_path = glob.glob(template)[0] with", "module_name = self.get_module_name(module_path) if not module_name: print(f\"Current module: unknown\") continue print(f\"Current module: {module_name}\")", "if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool:", "= module_name while True: if unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\" index +=", "-> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name)", "UEFI_GUIDS DIR_NAME = \"all\" PE_DIR = \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class", "f.read() module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no UI", "str: # Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name = module_name", "def search_pe(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) ->", "friendly name from the GUID database file_guids = g_re_guid.findall(dir_name) if not file_guids: return", "not module_name: print(f\"Current module: unknown\") continue print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir, module_name)", "name from the GUID database file_guids = g_re_guid.findall(dir_name) if not file_guids: return str()", "+ pe_files: module_name = self.get_module_name(module_path) if not module_name: print(f\"Current module: unknown\") continue print(f\"Current", "file ui_path = glob.glob(template)[0] with open(ui_path, \"rb\") as f: module_name = f.read() module_name", "if not file_guids: return str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if", "module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name = module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name)", "self.fw_name = fw_name self.dir_name = dir_name self.pe_dir = pe_dir self.modules = list() if", "shutil import colorama import uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR", "database file_guids = g_re_guid.findall(dir_name) if not file_guids: return str() module_guid = file_guids[-1].replace(\"file-\", \"\")", "= os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self) -> bool: if not os.path.isfile(self.fw_name): print(f\"[-]", "def dump_all(self) -> bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return False", "open(ui_path, \"rb\") as f: module_name = f.read() module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1])", "os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool: print(\"[-] This type", "is \"unknown\": return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name) ->", "os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool: print(\"[-] This", "dir_name self.pe_dir = pe_dir self.modules = list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not", "import pathlib import re import shutil import colorama import uefi_firmware from .guid_db import", "_ = os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1: # try", "try to get a friendly name from the GUID database file_guids = g_re_guid.findall(dir_name)", "= \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self, fw_name, dir_name,", "parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name) -> bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True)", "def _unsupported() -> bool: print(\"[-] This type of binary is not supported\") return", "return module_name @staticmethod def search_pe(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def", "\"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name = module_guid module_name = self.get_unique_name(module_name)", "PE_DIR = \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self, fw_name,", "dst) def dump_all(self) -> bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return", "module_guid = file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name = module_guid", "= f\"{module_name}_{index:#d}\" index += 1 continue return unique_name def get_module_name(self, module_path: str) ->", "pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path in te_files + pe_files: module_name", "unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\" index += 1 continue return unique_name def", "len(glob.glob(template)) == 1: # try to get a friendly name from the *.ui", "os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1: # try to get a friendly name", "Dumper: def __init__(self, fw_name, dir_name, pe_dir): self.fw_name = fw_name self.dir_name = dir_name self.pe_dir", "the GUID database file_guids = g_re_guid.findall(dir_name) if not file_guids: return str() module_guid =", "get_efi_images(fw_name) -> bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) # for correct color display", "@staticmethod def _unsupported() -> bool: print(\"[-] This type of binary is not supported\")", "module_path: str) -> str: module_name = str() dir_name, _ = os.path.split(module_path) template =", "\"rb\") as f: module_name = f.read() module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name)", "list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self):", "= fw_name self.dir_name = dir_name self.pe_dir = pe_dir self.modules = list() if not", "-> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) -> list: return list(map(str,", "True: if unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\" index += 1 continue return", "1 continue return unique_name def get_module_name(self, module_path: str) -> str: module_name = str()", "str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files =", "not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool: print(\"[-] This type of binary", "= glob.glob(template)[0] with open(ui_path, \"rb\") as f: module_name = f.read() module_name = module_name.decode(\"utf-16le\")", "= self.get_module_name(module_path) if not module_name: print(f\"Current module: unknown\") continue print(f\"Current module: {module_name}\") dst", "module_name while True: if unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\" index += 1", "\"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) # for correct color display in uefi_firmware module", "re import shutil import colorama import uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME =", "get a friendly name from the *.ui file ui_path = glob.glob(template)[0] with open(ui_path,", "def get_unique_name(self, module_name: str) -> str: # Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index", "file_guids = g_re_guid.findall(dir_name) if not file_guids: return str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name", "SPDX-License-Identifier: MIT import glob import os import pathlib import re import shutil import", "g_re_guid.findall(dir_name) if not file_guids: return str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper())", "g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self, fw_name, dir_name, pe_dir): self.fw_name", "as f: module_name = f.read() module_name = module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return", "fvh_index = file_content.find(b\"_FVH\") if fvh_index < 0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index -", "40 :]) if parser.type() is \"unknown\": return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return", "is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index < 0: return self._unsupported() parser =", "return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path", "self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def search_pe(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\")))", "if not module_name: print(f\"Current module: unknown\") continue print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir,", "type of binary is not supported\") return False def get_unique_name(self, module_name: str) ->", "module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no UI section, try to get", "shutil.copy(module_path, dst) def dump_all(self) -> bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\")", "return str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name", "te_files = self.search_te(self.dir_name) for module_path in te_files + pe_files: module_name = self.get_module_name(module_path) if", "def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path in te_files +", "import re import shutil import colorama import uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME", "if fvh_index < 0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if", "+= 1 continue return unique_name def get_module_name(self, module_path: str) -> str: module_name =", "parser.type() is \"unknown\": return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name)", "friendly name from the *.ui file ui_path = glob.glob(template)[0] with open(ui_path, \"rb\") as", "firmware\"\"\" colorama.init(autoreset=True) # for correct color display in uefi_firmware module dumper = Dumper(fw_name,", "return False def get_unique_name(self, module_name: str) -> str: # Get unique name, see", "a friendly name from the *.ui file ui_path = glob.glob(template)[0] with open(ui_path, \"rb\")", "pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path in te_files", "\"*.ui\") if len(glob.glob(template)) == 1: # try to get a friendly name from", "\"all\" PE_DIR = \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self,", "os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1: # try to get", "while True: if unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\" index += 1 continue", "import glob import os import pathlib import re import shutil import colorama import", "GUID database file_guids = g_re_guid.findall(dir_name) if not file_guids: return str() module_guid = file_guids[-1].replace(\"file-\",", "= self.search_te(self.dir_name) for module_path in te_files + pe_files: module_name = self.get_module_name(module_path) if not", "pe_dir self.modules = list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod", "pe_dir): self.fw_name = fw_name self.dir_name = dir_name self.pe_dir = pe_dir self.modules = list()", "from the *.ui file ui_path = glob.glob(template)[0] with open(ui_path, \"rb\") as f: module_name", "if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return False with open(self.fw_name, \"rb\") as", "template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1: # try to get a", "fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index", "get_unique_name(self, module_name: str) -> str: # Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index =", "def get_module_name(self, module_path: str) -> str: module_name = str() dir_name, _ = os.path.split(module_path)", "module_name: module_name = module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def search_pe(d:", "return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type() is \"unknown\": return", "def get_efi_images(fw_name) -> bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) # for correct color", "unique_name = f\"{module_name}_{index:#d}\" index += 1 continue return unique_name def get_module_name(self, module_path: str)", "to get a friendly name from the GUID database file_guids = g_re_guid.findall(dir_name) if", "unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name = module_name while True: if", "self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no UI section, try to get a friendly", "if not module_name: module_name = module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod", "if len(glob.glob(template)) == 1: # try to get a friendly name from the", "get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path in te_files + pe_files:", "file_content.find(b\"_FVH\") if fvh_index < 0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :])", "= module_name.decode(\"utf-16le\") module_name = self.get_unique_name(module_name[:-1]) self.modules.append(module_name) return module_name # no UI section, try", "module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def search_pe(d: str) -> list:", "as fw: file_content = fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index", "bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return False with open(self.fw_name, \"rb\")", "1 unique_name = module_name while True: if unique_name in self.modules: unique_name = f\"{module_name}_{index:#d}\"", "unknown\") continue print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self)", "module_name # no UI section, try to get a friendly name from the", "index = 1 unique_name = module_name while True: if unique_name in self.modules: unique_name", "str) -> str: # Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name", "module dumper = Dumper(fw_name, DIR_NAME, PE_DIR) if not dumper.dump_all(): exit() dumper.get_pe_files() return True", "{module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self) -> bool: if not", "os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self) -> bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check", "the *.ui file ui_path = glob.glob(template)[0] with open(ui_path, \"rb\") as f: module_name =", "self.get_module_name(module_path) if not module_name: print(f\"Current module: unknown\") continue print(f\"Current module: {module_name}\") dst =", "\"rb\") as fw: file_content = fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\":", "glob import os import pathlib import re import shutil import colorama import uefi_firmware", "for module_path in te_files + pe_files: module_name = self.get_module_name(module_path) if not module_name: print(f\"Current", ") class Dumper: def __init__(self, fw_name, dir_name, pe_dir): self.fw_name = fw_name self.dir_name =", "os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool: print(\"[-] This type of binary is", "import shutil import colorama import uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME = \"all\"", "= uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index < 0:", "of binary is not supported\") return False def get_unique_name(self, module_name: str) -> str:", "self.dir_name = dir_name self.pe_dir = pe_dir self.modules = list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name)", "a friendly name from the GUID database file_guids = g_re_guid.findall(dir_name) if not file_guids:", "os import pathlib import re import shutil import colorama import uefi_firmware from .guid_db", "get a friendly name from the GUID database file_guids = g_re_guid.findall(dir_name) if not", "pe_files: module_name = self.get_module_name(module_path) if not module_name: print(f\"Current module: unknown\") continue print(f\"Current module:", "\"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index < 0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index", "module_name = module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def search_pe(d: str)", "name, see https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name = module_name while True: if unique_name", "module_name @staticmethod def search_pe(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d:", "str() dir_name, _ = os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1:", "if parser.type() is \"unknown\": return self._unsupported() firmware = parser.parse() firmware.dump(self.dir_name) return True def", "print(\"[-] This type of binary is not supported\") return False def get_unique_name(self, module_name:", "name from the *.ui file ui_path = glob.glob(template)[0] with open(ui_path, \"rb\") as f:", "correct color display in uefi_firmware module dumper = Dumper(fw_name, DIR_NAME, PE_DIR) if not", "-> bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return False with open(self.fw_name,", "with open(ui_path, \"rb\") as f: module_name = f.read() module_name = module_name.decode(\"utf-16le\") module_name =", "in te_files + pe_files: module_name = self.get_module_name(module_path) if not module_name: print(f\"Current module: unknown\")", "= module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def search_pe(d: str) ->", "= \"all\" PE_DIR = \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def", "fw_name self.dir_name = dir_name self.pe_dir = pe_dir self.modules = list() if not os.path.isdir(self.dir_name):", "in self.modules: unique_name = f\"{module_name}_{index:#d}\" index += 1 continue return unique_name def get_module_name(self,", "https://github.com/binarly-io/efiXplorer/issues/11 index = 1 unique_name = module_name while True: if unique_name in self.modules:", "= self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path in te_files + pe_files: module_name =", "not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return False with open(self.fw_name, \"rb\") as fw:", "self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type() is \"unknown\": return self._unsupported()", "= fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if", "\"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\" ) class Dumper: def __init__(self, fw_name, dir_name, pe_dir):", "fvh_index < 0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type()", "= list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported()", "MIT import glob import os import pathlib import re import shutil import colorama", "uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR = \"modules\" g_re_guid =", "colorama import uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR = \"modules\"", "open(self.fw_name, \"rb\") as fw: file_content = fw.read() parser = uefi_firmware.AutoParser(file_content) if parser.type() is", "pathlib import re import shutil import colorama import uefi_firmware from .guid_db import UEFI_GUIDS", "import uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR = \"modules\" g_re_guid", "True def get_efi_images(fw_name) -> bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) # for correct", "dir_name, _ = os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1: #", "import colorama import uefi_firmware from .guid_db import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR =", "__init__(self, fw_name, dir_name, pe_dir): self.fw_name = fw_name self.dir_name = dir_name self.pe_dir = pe_dir", "= file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name = module_guid module_name", "bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) # for correct color display in uefi_firmware", "# try to get a friendly name from the *.ui file ui_path =", "search_pe(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) -> list:", "list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for module_path in", "if parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index < 0: return self._unsupported()", "list: return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\")))", "list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def get_pe_files(self): pe_files = self.search_pe(self.dir_name) te_files = self.search_te(self.dir_name) for", "module_name = str() dir_name, _ = os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template))", "binary is not supported\") return False def get_unique_name(self, module_name: str) -> str: #", "not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def _unsupported() -> bool: print(\"[-]", "firmware = parser.parse() firmware.dump(self.dir_name) return True def get_efi_images(fw_name) -> bool: \"\"\"get images from", "file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name = module_guid module_name =", "uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type() is \"unknown\": return self._unsupported() firmware = parser.parse()", "self.modules = list() if not os.path.isdir(self.dir_name): os.mkdir(self.dir_name) if not os.path.isdir(self.pe_dir): os.mkdir(self.pe_dir) @staticmethod def", "dump_all(self) -> bool: if not os.path.isfile(self.fw_name): print(f\"[-] Check {self.fw_name} file\") return False with", "supported\") return False def get_unique_name(self, module_name: str) -> str: # Get unique name,", "firmware.dump(self.dir_name) return True def get_efi_images(fw_name) -> bool: \"\"\"get images from firmware\"\"\" colorama.init(autoreset=True) #", "This type of binary is not supported\") return False def get_unique_name(self, module_name: str)", "str) -> str: module_name = str() dir_name, _ = os.path.split(module_path) template = os.path.join(dir_name,", "module: unknown\") continue print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def", "< 0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type() is", "UEFI_GUIDS.get(module_guid.upper()) if not module_name: module_name = module_guid module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name", "parser.type() is \"unknown\": fvh_index = file_content.find(b\"_FVH\") if fvh_index < 0: return self._unsupported() parser", "UI section, try to get a friendly name from the GUID database file_guids", "module_name: print(f\"Current module: unknown\") continue print(f\"Current module: {module_name}\") dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path,", "# no UI section, try to get a friendly name from the GUID", "= os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if len(glob.glob(template)) == 1: # try to", "module_name = self.get_unique_name(module_name) self.modules.append(module_name) return module_name @staticmethod def search_pe(d: str) -> list: return", "class Dumper: def __init__(self, fw_name, dir_name, pe_dir): self.fw_name = fw_name self.dir_name = dir_name", "file_guids: return str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not module_name:", "dst = os.path.join(self.pe_dir, module_name) shutil.copy(module_path, dst) def dump_all(self) -> bool: if not os.path.isfile(self.fw_name):", "not file_guids: return str() module_guid = file_guids[-1].replace(\"file-\", \"\") module_name = UEFI_GUIDS.get(module_guid.upper()) if not", "file\") return False with open(self.fw_name, \"rb\") as fw: file_content = fw.read() parser =", "False with open(self.fw_name, \"rb\") as fw: file_content = fw.read() parser = uefi_firmware.AutoParser(file_content) if", "0: return self._unsupported() parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :]) if parser.type() is \"unknown\":", "str: module_name = str() dir_name, _ = os.path.split(module_path) template = os.path.join(dir_name, \"*.ui\") if", "return list(map(str, pathlib.Path(d).rglob(\"*.pe\"))) @staticmethod def search_te(d: str) -> list: return list(map(str, pathlib.Path(d).rglob(\"*.te\"))) def", "not supported\") return False def get_unique_name(self, module_name: str) -> str: # Get unique", ".guid_db import UEFI_GUIDS DIR_NAME = \"all\" PE_DIR = \"modules\" g_re_guid = re.compile( r\"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\"", "from the GUID database file_guids = g_re_guid.findall(dir_name) if not file_guids: return str() module_guid" ]
[ "x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i for i in range(10)", "is a summary file generated by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as", "+ 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i for i", "= parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help and exit\") general.add_argument(\"-v\",", "for the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages also to terminal.\",", "default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz',", "reads', title=title or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100,", "default=8) target = parser.add_argument_group( title=\"Input data sources, requires a bam and a summary", "cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax = sns.regplot(", "to all plots, requires quoting if using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many", "action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number of threads to be", "help=\"Specify directory in which output has to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify", "+ \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1),", "> 10 * dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in", "many hours to plot in the graph\", type=int, default=8) target = parser.add_argument_group( title=\"Input", "seaborn as sns import matplotlib.pyplot as plt import numpy as np def main():", "path, figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24, warning=False)", "dtype float64 cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax", "which output has to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix", ".reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1},", "dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') #", "title=title or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches=\"tight\")", "generated by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a sorted bam file.\",", "df=df, timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64", "plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg',", "ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches=\"tight\") plt.close(\"all\") if __name__ == '__main__':", "plots, requires quoting if using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours to", "the output format of the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf',", "directory in which output has to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an", "quoting if using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours to plot in", "= parser.add_argument_group( title=\"Input data sources, requires a bam and a summary file.\") target.add_argument(\"--summary\",", "axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"]", "nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a sorted bam file.\", nargs='+', metavar=\"files\", required=True)", "'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title to", "sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog = \"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection", "help=\"Set the allowed number of threads to be used by the script\", default=4,", "help=\"Specify the output format of the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf',", "nanoplotter import check_valid_time_and_sort, Plot from os import path import seaborn as sns import", "1}, ax=ax) yticks = [10**i for i in range(10) if not 10**i >", "= get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df,", "using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours to plot in the graph\",", "dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'],", "of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\",", "get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title,", "number of threads to be used by the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\",", "'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title to all plots, requires quoting", "and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number of threads", "import ArgumentParser from nanoplot import utils from .version import __version__ from nanoplotter import", "np def main(): args = get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"),", "description=\"Get detection curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General", "default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix to be used for the output", "ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log transformed number of", "check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype", "main(): args = get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True,", "\\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\",", "\\ .reset_index(drop=True) \\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False,", "to be used by the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in", "dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log transformed number", "options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version", "from nanoplotter import check_valid_time_and_sort, Plot from os import path import seaborn as sns", "help=\"Data is a summary file generated by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data", "type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours to plot in the graph\", type=int, default=8)", "the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which output has to", "utils from .version import __version__ from nanoplotter import check_valid_time_and_sort, Plot from os import", "summary file.\") target.add_argument(\"--summary\", help=\"Data is a summary file generated by albacore.\", nargs='+', metavar=\"files\",", "type=int, default=8) target = parser.add_argument_group( title=\"Input data sources, requires a bam and a", "__version__ from nanoplotter import check_valid_time_and_sort, Plot from os import path import seaborn as", "import check_valid_time_and_sort, Plot from os import path import seaborn as sns import matplotlib.pyplot", "\"--help\", action=\"help\", help=\"show the help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\",", "customizing the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format of the plots.\",", "'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a", "choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff'])", "= \"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter,", "yticks = [10**i for i in range(10) if not 10**i > 10 *", "\"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog = \"\"\"\"\"\" parser = ArgumentParser( description=\"Get", "\"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False)", "def get_args(): epilog = \"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection curve of nanopore", "curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General options') general.add_argument(\"-h\",", "figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def", "i in range(10) if not 10**i > 10 * dfs[\"index\"].max()] ax.set( xlabel='Run time", "type=str) general.add_argument(\"--verbose\", help=\"Write log messages also to terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options", "= dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" +", "cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches=\"tight\") plt.close(\"all\") if __name__ == '__main__': main()", "the allowed number of threads to be used by the script\", default=4, type=int)", "visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format of the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg',", "how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix)", "y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i for", "plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\",", "general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix to be used for the output files.\",", "help=\"Data as a sorted bam file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df,", "ax=ax) yticks = [10**i for i in range(10) if not 10**i > 10", "/ 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads = Plot(", "def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours /", "from nanoplot import utils from .version import __version__ from nanoplotter import check_valid_time_and_sort, Plot", "get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir,", "of the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw',", "type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif',", "return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\",", "parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours", "title to all plots, requires quoting if using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How", "nanoplot import utils from .version import __version__ from nanoplotter import check_valid_time_and_sort, Plot from", "as plt import numpy as np def main(): args = get_args() merged_df =", "allowed number of threads to be used by the script\", default=4, type=int) general.add_argument(\"-o\",", "by the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which output has", "dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\"", "add_help=False) general = parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help and", "help=\"Specify an optional prefix to be used for the output files.\", default=\"\", type=str)", "parser.add_argument_group( title=\"Input data sources, requires a bam and a summary file.\") target.add_argument(\"--summary\", help=\"Data", "visual.add_argument(\"--hours\", help=\"How many hours to plot in the graph\", type=int, default=8) target =", "title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\":", "time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log transformed number of reads', title=title", "output has to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix to", "an optional prefix to be used for the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\",", ".reset_index(drop=True) \\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\",", "be used by the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which", "import seaborn as sns import matplotlib.pyplot as plt import numpy as np def", "import __version__ from nanoplotter import check_valid_time_and_sort, Plot from os import path import seaborn", "= get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format,", "\"--prefix\", help=\"Specify an optional prefix to be used for the output files.\", default=\"\",", "1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i for i in", "plt import numpy as np def main(): args = get_args() merged_df = get_input(source=\"summary\",", "[10**i for i in range(10) if not 10**i > 10 * dfs[\"index\"].max()] ax.set(", "path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"],", "scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i for i in range(10) if not 10**i", "color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i for i in range(10) if not", "files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir,", "has to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix to be", "nanoget import get_input from argparse import ArgumentParser from nanoplot import utils from .version", "sns import matplotlib.pyplot as plt import numpy as np def main(): args =", "general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the", "\\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\":", "of reads', title=title or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat,", "output format of the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png',", "'tiff']) visual.add_argument(\"--title\", help=\"Add a title to all plots, requires quoting if using spaces\",", "= sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks", "and a summary file.\") target.add_argument(\"--summary\", help=\"Data is a summary file generated by albacore.\",", "action=\"help\", help=\"show the help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\", action=\"version\",", "in which output has to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional", "help=\"Add a title to all plots, requires quoting if using spaces\", type=str, default=None)", "'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title to all plots, requires quoting if", "the help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__))", "numpy as np def main(): args = get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\", "to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix to be used", "to terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options for customizing the plots created') visual.add_argument(\"-f\",", "'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\",", "to plot in the graph\", type=int, default=8) target = parser.add_argument_group( title=\"Input data sources,", "?! dtype float64 cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\")", "script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which output has to be", "10**i > 10 * dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield", "{}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number of threads to be used by", "be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix to be used for", "for i in range(10) if not 10**i > 10 * dfs[\"index\"].max()] ax.set( xlabel='Run", "epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the", "general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and", "title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args():", "(minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log transformed number of reads', title=title or", "fig = ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches=\"tight\") plt.close(\"all\") if __name__", "merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix),", "right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\",", "target.add_argument(\"--summary\", help=\"Data is a summary file generated by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\",", "fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True)", "color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\", "file generated by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a sorted bam", "target.add_argument(\"--bam\", help=\"Data as a sorted bam file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args() def", "1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\")", "title=\"Input data sources, requires a bam and a summary file.\") target.add_argument(\"--summary\", help=\"Data is", "a sorted bam file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\",", "as sns import matplotlib.pyplot as plt import numpy as np def main(): args", "import numpy as np def main(): args = get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\")", "for customizing the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format of the", "transformed number of reads', title=title or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig = fig", "yield in log transformed number of reads', title=title or cum_yield_reads.title) fig = ax.get_figure()", "bam file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8):", "a summary file generated by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a", "type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which output has to be created.\", default=\".\")", "title='Options for customizing the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format of", "in the graph\", type=int, default=8) target = parser.add_argument_group( title=\"Input data sources, requires a", "requires quoting if using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours to plot", "the graph\", type=int, default=8) target = parser.add_argument_group( title=\"Input data sources, requires a bam", "experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show", "created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\", help=\"Specify an optional prefix to be used for the", "required=True) target.add_argument(\"--bam\", help=\"Data as a sorted bam file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args()", "float64 cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax =", "days=hours / 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads =", "the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages also to terminal.\", action=\"store_true\")", "hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]')", "= ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches=\"tight\") plt.close(\"all\") if __name__ ==", "args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog = \"\"\"\"\"\" parser =", "number of reads', title=title or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path,", "x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i", ".merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv(", "plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24,", "help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\",", "ArgumentParser from nanoplot import utils from .version import __version__ from nanoplotter import check_valid_time_and_sort,", "ylabel='Cumulative yield in log transformed number of reads', title=title or cum_yield_reads.title) fig =", "+ figformat, title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False,", "path import seaborn as sns import matplotlib.pyplot as plt import numpy as np", "graph\", type=int, default=8) target = parser.add_argument_group( title=\"Input data sources, requires a bam and", "timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads", "figformat, title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\",", "yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1})", "'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title to all plots, requires quoting if using", "xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log transformed number of reads',", "\"--version\", help=\"Print version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed", "also to terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options for customizing the plots created')", "how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog = \"\"\"\"\"\"", "default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which output has to be created.\",", "'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title to all plots,", "help=\"show the help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\", action=\"version\", version='NanoComp", "metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort(", "'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title to all", "output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages also to terminal.\", action=\"store_true\") visual", "left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) +", "24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads = Plot( path=path", "os import path import seaborn as sns import matplotlib.pyplot as plt import numpy", "import utils from .version import __version__ from nanoplotter import check_valid_time_and_sort, Plot from os", "and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\",", "nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\",", "or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches=\"tight\") plt.close(\"all\")", "merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog =", "cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig = fig fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches=\"tight\") plt.close(\"all\") if", "log messages also to terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options for customizing the", "threads to be used by the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory", "nargs='+', metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs =", "messages also to terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options for customizing the plots", "help=\"Write log messages also to terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options for customizing", "fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks = [10**i for i in range(10) if", "general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number of threads to be used by the", "data sources, requires a bam and a summary file.\") target.add_argument(\"--summary\", help=\"Data is a", "detection curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General options')", "get_args(): epilog = \"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection curve of nanopore experiment.\",", "metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a sorted bam file.\", nargs='+', metavar=\"files\", required=True) return", "be used for the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages also", "terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options for customizing the plots created') visual.add_argument(\"-f\", \"--format\",", "sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax) yticks =", "get_input from argparse import ArgumentParser from nanoplot import utils from .version import __version__", "title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help and exit\") general.add_argument(\"-v\", \"--version\", help=\"Print", "created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format of the plots.\", default=\"png\", type=str, choices=['eps',", "matplotlib.pyplot as plt import numpy as np def main(): args = get_args() merged_df", ".version import __version__ from nanoplotter import check_valid_time_and_sort, Plot from os import path import", "file.\") target.add_argument(\"--summary\", help=\"Data is a summary file generated by albacore.\", nargs='+', metavar=\"files\", required=True)", "action=\"store_true\") visual = parser.add_argument_group( title='Options for customizing the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify", "in range(10) if not 10**i > 10 * dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)',", "\\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] +", "the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba',", "sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index',", "general.add_argument(\"--verbose\", help=\"Write log messages also to terminal.\", action=\"store_true\") visual = parser.add_argument_group( title='Options for", "argparse import ArgumentParser from nanoplot import utils from .version import __version__ from nanoplotter", "parser.add_argument_group( title='Options for customizing the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format", "= sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df =", "\"--outdir\", help=\"Specify directory in which output has to be created.\", default=\".\") general.add_argument(\"-p\", \"--prefix\",", "from nanoget import get_input from argparse import ArgumentParser from nanoplot import utils from", "version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number of threads to be used", "= Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'],", "format of the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps',", "used for the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages also to", "spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours to plot in the graph\", type=int,", "# ?! dtype float64 cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative", "= [10**i for i in range(10) if not 10**i > 10 * dfs[\"index\"].max()]", "a title to all plots, requires quoting if using spaces\", type=str, default=None) visual.add_argument(\"--hours\",", "+ \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog = \"\"\"\"\"\" parser = ArgumentParser(", "by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a sorted bam file.\", nargs='+',", "def main(): args = get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\",", "sources, requires a bam and a summary file.\") target.add_argument(\"--summary\", help=\"Data is a summary", "file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs", "path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog = \"\"\"\"\"\" parser", "x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1)", "prefix to be used for the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log", "= parser.add_argument_group( title='Options for customizing the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output", "y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1) \\", "how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None,", "* dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log transformed", "Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index']", "yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log transformed number of reads', title=title or cum_yield_reads.title)", "if not 10**i > 10 * dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks,", "from os import path import seaborn as sns import matplotlib.pyplot as plt import", "visual = parser.add_argument_group( title='Options for customizing the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the", "yticklabels=yticks, ylabel='Cumulative yield in log transformed number of reads', title=title or cum_yield_reads.title) fig", "from argparse import ArgumentParser from nanoplot import utils from .version import __version__ from", "epilog = \"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection curve of nanopore experiment.\", epilog=epilog,", ".dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1),", "hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip') def get_args(): epilog", "10 * dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative yield in log", "the plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format of the plots.\", default=\"png\",", "a bam and a summary file.\") target.add_argument(\"--summary\", help=\"Data is a summary file generated", "optional prefix to be used for the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write", "x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\", "ax = sns.regplot( x=aligned_df['start_time'], y=np.log10(aligned_df[\"index\"] + 1), x_ci=None, fit_reg=False, color=\"red\", scatter_kws={\"s\": 1}, ax=ax)", "import path import seaborn as sns import matplotlib.pyplot as plt import numpy as", "1}) aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax", "'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title to all plots, requires", "columns=[\"start_time\"], compression='gzip') def get_args(): epilog = \"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection curve", "parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help and exit\") general.add_argument(\"-v\", \"--version\",", "as np def main(): args = get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\",", "exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number of threads to", "compression='gzip') def get_args(): epilog = \"\"\"\"\"\" parser = ArgumentParser( description=\"Get detection curve of", "ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df", "all plots, requires quoting if using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours", "log transformed number of reads', title=title or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig =", "target = parser.add_argument_group( title=\"Input data sources, requires a bam and a summary file.\")", "\"--format\", help=\"Specify the output format of the plots.\", default=\"png\", type=str, choices=['eps', 'jpeg', 'jpg',", "parser = ArgumentParser( description=\"Get detection curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general", "check_valid_time_and_sort, Plot from os import path import seaborn as sns import matplotlib.pyplot as", "files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages also to terminal.\", action=\"store_true\") visual =", "requires a bam and a summary file.\") target.add_argument(\"--summary\", help=\"Data is a summary file", "'jpg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add", "dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads = Plot( path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat,", "to be used for the output files.\", default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages", "as a sorted bam file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df, path,", "exit\") general.add_argument(\"-v\", \"--version\", help=\"Print version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set", "used by the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which output", "a summary file.\") target.add_argument(\"--summary\", help=\"Data is a summary file generated by albacore.\", nargs='+',", "plots created') visual.add_argument(\"-f\", \"--format\", help=\"Specify the output format of the plots.\", default=\"png\", type=str,", "if using spaces\", type=str, default=None) visual.add_argument(\"--hours\", help=\"How many hours to plot in the", "import matplotlib.pyplot as plt import numpy as np def main(): args = get_args()", "bam and a summary file.\") target.add_argument(\"--summary\", help=\"Data is a summary file generated by", "hours to plot in the graph\", type=int, default=8) target = parser.add_argument_group( title=\"Input data", "not 10**i > 10 * dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks), yticklabels=yticks, ylabel='Cumulative", "import get_input from argparse import ArgumentParser from nanoplot import utils from .version import", "aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax =", "help=\"Print version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number", "general = parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help and exit\")", "summary file generated by albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a sorted", "files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True) plot_retrotect(df=merged_df, path=path.join(args.outdir, args.prefix), figformat=args.format, title=args.title, hours=args.hours)", "formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group( title='General options') general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show the help", "general.add_argument(\"-o\", \"--outdir\", help=\"Specify directory in which output has to be created.\", default=\".\") general.add_argument(\"-p\",", "= dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index() ax = sns.regplot(", "\"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] + 1), x_ci=None,", "ArgumentParser( description=\"Get detection curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general = parser.add_argument_group(", "default=\"\", type=str) general.add_argument(\"--verbose\", help=\"Write log messages also to terminal.\", action=\"store_true\") visual = parser.add_argument_group(", "range(10) if not 10**i > 10 * dfs[\"index\"].max()] ax.set( xlabel='Run time (minutes)', yticks=np.log10(yticks),", "sorted bam file.\", nargs='+', metavar=\"files\", required=True) return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None,", "+ 1), x_ci=None, fit_reg=False, color=\"blue\", scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\",", "required=True) return parser.parse_args() def plot_retrotect(df, path, figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort( df=df,", "from .version import __version__ from nanoplotter import check_valid_time_and_sort, Plot from os import path", "\"--threads\", help=\"Set the allowed number of threads to be used by the script\",", "'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']) visual.add_argument(\"--title\", help=\"Add a title", "plot in the graph\", type=int, default=8) target = parser.add_argument_group( title=\"Input data sources, requires", "Plot from os import path import seaborn as sns import matplotlib.pyplot as plt", "= check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?!", "version and exit.\", action=\"version\", version='NanoComp {}'.format(__version__)) general.add_argument(\"-t\", \"--threads\", help=\"Set the allowed number of", "scatter_kws={\"s\": 1}) aligned_df = dfs.drop('index', axis=1) \\ .dropna(axis=\"index\", how=\"any\") \\ .reset_index(drop=True) \\ .reset_index()", "visual.add_argument(\"--title\", help=\"Add a title to all plots, requires quoting if using spaces\", type=str,", "title=None, hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"] =", "default=None) visual.add_argument(\"--hours\", help=\"How many hours to plot in the graph\", type=int, default=8) target", "args.prefix), figformat=args.format, title=args.title, hours=args.hours) merged_df.dropna(axis=\"index\", how=\"any\").sort_values(by=\"start_time\").to_csv( path_or_buf=path.join(args.outdir, args.prefix) + \"Retrotect_details.txt.gz\", sep=\"\\t\", columns=[\"start_time\"], compression='gzip')", "args = get_args() merged_df = get_input(source=\"summary\", files=args.summary).set_index(\"readIDs\") \\ .merge(right=get_input(source=\"bam\", files=args.bam).set_index(\"readIDs\"), how=\"left\", left_index=True, right_index=True)", "in log transformed number of reads', title=title or cum_yield_reads.title) fig = ax.get_figure() cum_yield_reads.fig", "of threads to be used by the script\", default=4, type=int) general.add_argument(\"-o\", \"--outdir\", help=\"Specify", "albacore.\", nargs='+', metavar=\"files\", required=True) target.add_argument(\"--bam\", help=\"Data as a sorted bam file.\", nargs='+', metavar=\"files\",", "warning=False) dfs[\"start_time\"] = dfs[\"start_time\"].astype('timedelta64[m]') # ?! dtype float64 cum_yield_reads = Plot( path=path +", "path=path + \"CumulativeYieldPlot_NumberOfReads.\" + figformat, title=\"Cumulative yield\") ax = sns.regplot( x=dfs['start_time'], y=np.log10(dfs['index'] +", "help=\"How many hours to plot in the graph\", type=int, default=8) target = parser.add_argument_group(", "= ArgumentParser( description=\"Get detection curve of nanopore experiment.\", epilog=epilog, formatter_class=utils.custom_formatter, add_help=False) general =", "figformat=\"png\", title=None, hours=8): dfs = check_valid_time_and_sort( df=df, timescol=\"start_time\", days=hours / 24, warning=False) dfs[\"start_time\"]" ]
[ ".clinic import Clinic from .region_simplifier import RegionSimplifier from .decompiler import Decompiler from .decompilation_options", "from .region_identifier import RegionIdentifier from .structurer import Structurer from .structured_codegen import StructuredCodeGenerator from", "StructuredCodeGenerator from .clinic import Clinic from .region_simplifier import RegionSimplifier from .decompiler import Decompiler", ".structured_codegen import StructuredCodeGenerator from .clinic import Clinic from .region_simplifier import RegionSimplifier from .decompiler", ".region_identifier import RegionIdentifier from .structurer import Structurer from .structured_codegen import StructuredCodeGenerator from .clinic", "from .structurer import Structurer from .structured_codegen import StructuredCodeGenerator from .clinic import Clinic from", "from .region_simplifier import RegionSimplifier from .decompiler import Decompiler from .decompilation_options import options, options_by_category", "import RegionIdentifier from .structurer import Structurer from .structured_codegen import StructuredCodeGenerator from .clinic import", "<reponame>zhu8655/angr from .region_identifier import RegionIdentifier from .structurer import Structurer from .structured_codegen import StructuredCodeGenerator", ".structurer import Structurer from .structured_codegen import StructuredCodeGenerator from .clinic import Clinic from .region_simplifier", "import Clinic from .region_simplifier import RegionSimplifier from .decompiler import Decompiler from .decompilation_options import", "Structurer from .structured_codegen import StructuredCodeGenerator from .clinic import Clinic from .region_simplifier import RegionSimplifier", "import Structurer from .structured_codegen import StructuredCodeGenerator from .clinic import Clinic from .region_simplifier import", "from .structured_codegen import StructuredCodeGenerator from .clinic import Clinic from .region_simplifier import RegionSimplifier from", "import RegionSimplifier from .decompiler import Decompiler from .decompilation_options import options, options_by_category from .", "from .clinic import Clinic from .region_simplifier import RegionSimplifier from .decompiler import Decompiler from", "Clinic from .region_simplifier import RegionSimplifier from .decompiler import Decompiler from .decompilation_options import options,", "from .decompiler import Decompiler from .decompilation_options import options, options_by_category from . import optimization_passes", "import StructuredCodeGenerator from .clinic import Clinic from .region_simplifier import RegionSimplifier from .decompiler import", ".region_simplifier import RegionSimplifier from .decompiler import Decompiler from .decompilation_options import options, options_by_category from", "RegionIdentifier from .structurer import Structurer from .structured_codegen import StructuredCodeGenerator from .clinic import Clinic", "RegionSimplifier from .decompiler import Decompiler from .decompilation_options import options, options_by_category from . import" ]
[ "wait_c + ((eom & 4) == 4) * 10) time.sleep(wait) if wait_a >", "psgplayer): self.psgplayer = psgplayer # play sound by MML string # @param chA_MML", "import time class Sequencer: # initialize. # @param psgplayer ymz294.PSGPlayer instance def __init__(self,", "channel A # @param chB_MML a MML string for PSG channel B #", "< len(chC_seq): seq = chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1", "4's A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq =", "> 0: wait_a -= wait if wait_b > 0: wait_b -= wait if", "seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] == False: env", "seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType() if env is not None and channel", "the octave 4's A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq)", "wait_c = 0 index_c = 0 eom = 0 #End of mml while(index_a", "self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait = min(wait_a + ((eom & 1) ==", "+ ((eom & 2) == 2) * 10, wait_c + ((eom & 4)", "((eom & 2) == 2) * 10, wait_c + ((eom & 4) ==", "1) * 10, wait_b + ((eom & 2) == 2) * 10, wait_c", "wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel, seq): if seq[\"freq\"] !=", "@param core_freq frequency of the octave 4's A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\",", "parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a = 0 index_a = 0 wait_b = 0", "#End of mml while(index_a < len(chA_seq) or index_b < len(chB_seq) or index_c <", "& 4) == 4) * 10) time.sleep(wait) if wait_a > 0: wait_a -=", "len(chC_seq): seq = chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else:", "# play sound by MML string # @param chA_MML a MML string for", "chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq", "channel C # @param core_freq frequency of the octave 4's A def playMML(self,", "mml while(index_a < len(chA_seq) or index_b < len(chB_seq) or index_c < len(chC_seq)): if", "a MML string for PSG channel C # @param core_freq frequency of the", "seq) index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait = min(wait_a", "> 0: wait_b -= wait if wait_c > 0: wait_c -= wait time.sleep(max(wait_a,", "@param chB_MML a MML string for PSG channel B # @param chC_MML a", "ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c <= 0: if index_c < len(chC_seq): seq", "time.sleep(wait) if wait_a > 0: wait_a -= wait if wait_b > 0: wait_b", "for PSG channel A # @param chB_MML a MML string for PSG channel", "= psgplayer # play sound by MML string # @param chA_MML a MML", "psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer = psgplayer # play sound by", "wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |=", "chA_MML a MML string for PSG channel A # @param chB_MML a MML", "= seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2", "PSG channel B # @param chC_MML a MML string for PSG channel C", "ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait = min(wait_a + ((eom & 1) == 1)", "else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait = min(wait_a + ((eom & 1)", "seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if", "seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute", "0 index_c = 0 eom = 0 #End of mml while(index_a < len(chA_seq)", "@param psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer = psgplayer # play sound", "10, wait_c + ((eom & 4) == 4) * 10) time.sleep(wait) if wait_a", "< len(chA_seq): seq = chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1", "4) == 4) * 10) time.sleep(wait) if wait_a > 0: wait_a -= wait", "if wait_c <= 0: if index_c < len(chC_seq): seq = chC_seq[index_c] wait_c =", "= 0 wait_c = 0 index_c = 0 eom = 0 #End of", "parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a = 0 index_a = 0", "<= 0: if index_c < len(chC_seq): seq = chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C,", "else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b <= 0: if index_b <", "self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c <= 0: if index_c < len(chC_seq):", "# @param psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer = psgplayer # play", "chB_MML a MML string for PSG channel B # @param chC_MML a MML", "B # @param chC_MML a MML string for PSG channel C # @param", "1) == 1) * 10, wait_b + ((eom & 2) == 2) *", "return if seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType() if env is not None", "if index_b < len(chB_seq): seq = chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b", "instance def __init__(self, psgplayer): self.psgplayer = psgplayer # play sound by MML string", "index_c < len(chC_seq)): if wait_a <= 0: if index_a < len(chA_seq): seq =", "== 2) * 10, wait_c + ((eom & 4) == 4) * 10)", "& 1) == 1) * 10, wait_b + ((eom & 2) == 2)", "self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"]", "eom = 0 #End of mml while(index_a < len(chA_seq) or index_b < len(chB_seq)", "wait_b <= 0: if index_b < len(chB_seq): seq = chB_seq[index_b] wait_b = seq[\"duration\"]", "core_freq=440): parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML)", "string for PSG channel C # @param core_freq frequency of the octave 4's", "wait = min(wait_a + ((eom & 1) == 1) * 10, wait_b +", "+ ((eom & 4) == 4) * 10) time.sleep(wait) if wait_a > 0:", "wait_a <= 0: if index_a < len(chA_seq): seq = chA_seq[index_a] wait_a = seq[\"duration\"]", "coding:utf-8 -*- import ymz294 import mml import time class Sequencer: # initialize. #", "wait_b = 0 index_b = 0 wait_c = 0 index_c = 0 eom", "seq = chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else: self.psgplayer.setMute(True,", "< len(chB_seq): seq = chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1", "0: if index_a < len(chA_seq): seq = chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq)", "wait if wait_b > 0: wait_b -= wait if wait_c > 0: wait_c", "#print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] == False:", "== False: env = self.psgplayer.getEnvelopType() if env is not None and channel ==", "eom |= 4 wait = min(wait_a + ((eom & 1) == 1) *", "string for PSG channel B # @param chC_MML a MML string for PSG", "string for PSG channel A # @param chB_MML a MML string for PSG", "chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a = 0 index_a", "ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b <= 0: if index_b < len(chB_seq): seq", "MML string for PSG channel A # @param chB_MML a MML string for", "= parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a = 0 index_a =", "<= 0: if index_b < len(chB_seq): seq = chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B,", "2 if wait_c <= 0: if index_c < len(chC_seq): seq = chC_seq[index_c] wait_c", "== 1) * 10, wait_b + ((eom & 2) == 2) * 10,", "of mml while(index_a < len(chA_seq) or index_b < len(chB_seq) or index_c < len(chC_seq)):", "len(chC_seq)): if wait_a <= 0: if index_a < len(chA_seq): seq = chA_seq[index_a] wait_a", "len(chB_seq): seq = chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else:", "= min(wait_a + ((eom & 1) == 1) * 10, wait_b + ((eom", "channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType() if env", "wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |=", "self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait =", "index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b <= 0:", "seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel)", "frequency of the octave 4's A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser", "channel B # @param chC_MML a MML string for PSG channel C #", "@param chC_MML a MML string for PSG channel C # @param core_freq frequency", "= seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4", "ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer = psgplayer # play sound by MML", "< len(chA_seq) or index_b < len(chB_seq) or index_c < len(chC_seq)): if wait_a <=", "* 10, wait_b + ((eom & 2) == 2) * 10, wait_c +", "20000) return if seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType() if env is not", "if seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType() if env is not None and", "if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True,", "seq = chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else: self.psgplayer.setMute(True,", "if wait_a > 0: wait_a -= wait if wait_b > 0: wait_b -=", "MML string for PSG channel B # @param chC_MML a MML string for", "# @param core_freq frequency of the octave 4's A def playMML(self, chA_MML, chB_MML=\"\",", "0 #End of mml while(index_a < len(chA_seq) or index_b < len(chB_seq) or index_c", "((eom & 4) == 4) * 10) time.sleep(wait) if wait_a > 0: wait_a", "wait_c <= 0: if index_c < len(chC_seq): seq = chC_seq[index_c] wait_c = seq[\"duration\"]", "Sequencer: # initialize. # @param psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer =", "self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c", "+= 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c <= 0: if", "seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait", "if wait_b > 0: wait_b -= wait if wait_c > 0: wait_c -=", "len(chA_seq): seq = chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else:", "time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel, seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel)", "wait if wait_c > 0: wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self,", "__init__(self, psgplayer): self.psgplayer = psgplayer # play sound by MML string # @param", "= 0 eom = 0 #End of mml while(index_a < len(chA_seq) or index_b", "= seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1", "0 wait_b = 0 index_b = 0 wait_c = 0 index_c = 0", "playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq =", "-*- coding:utf-8 -*- import ymz294 import mml import time class Sequencer: # initialize.", "@param chA_MML a MML string for PSG channel A # @param chB_MML a", "MML string # @param chA_MML a MML string for PSG channel A #", "chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq =", "PSG channel A # @param chB_MML a MML string for PSG channel B", "len(chB_seq) or index_c < len(chC_seq)): if wait_a <= 0: if index_a < len(chA_seq):", "seq) index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b <=", "= 0 index_b = 0 wait_c = 0 index_c = 0 eom =", "1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b <= 0: if index_b", "class Sequencer: # initialize. # @param psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer", "channel, seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else:", "or index_c < len(chC_seq)): if wait_a <= 0: if index_a < len(chA_seq): seq", "chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom", "= 0 #End of mml while(index_a < len(chA_seq) or index_b < len(chB_seq) or", "== 4) * 10) time.sleep(wait) if wait_a > 0: wait_a -= wait if", "# @param chB_MML a MML string for PSG channel B # @param chC_MML", "0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000)", "#mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType()", "2) == 2) * 10, wait_c + ((eom & 4) == 4) *", "parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a", "* 10, wait_c + ((eom & 4) == 4) * 10) time.sleep(wait) if", "# @param chA_MML a MML string for PSG channel A # @param chB_MML", "|= 1 if wait_b <= 0: if index_b < len(chB_seq): seq = chB_seq[index_b]", "mml import time class Sequencer: # initialize. # @param psgplayer ymz294.PSGPlayer instance def", "wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel, seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False,", "= chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B)", "#self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType() if env is", "|= 4 wait = min(wait_a + ((eom & 1) == 1) * 10,", "seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] ==", "index_c = 0 eom = 0 #End of mml while(index_a < len(chA_seq) or", "MML string for PSG channel C # @param core_freq frequency of the octave", "by MML string # @param chA_MML a MML string for PSG channel A", "index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait = min(wait_a +", "= parser.parse(chC_MML) wait_a = 0 index_a = 0 wait_b = 0 index_b =", "0 index_b = 0 wait_c = 0 index_c = 0 eom = 0", "= 0 wait_b = 0 index_b = 0 wait_c = 0 index_c =", "len(chA_seq) or index_b < len(chB_seq) or index_c < len(chC_seq)): if wait_a <= 0:", "< len(chB_seq) or index_c < len(chC_seq)): if wait_a <= 0: if index_a <", "< len(chC_seq)): if wait_a <= 0: if index_a < len(chA_seq): seq = chA_seq[index_a]", "eom |= 1 if wait_b <= 0: if index_b < len(chB_seq): seq =", "index_b < len(chB_seq): seq = chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b +=", "seq) index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c <=", "self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b <= 0: if index_b < len(chB_seq):", "|= 2 if wait_c <= 0: if index_c < len(chC_seq): seq = chC_seq[index_c]", "self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] == False: env = self.psgplayer.getEnvelopType() if", "parser.parse(chC_MML) wait_a = 0 index_a = 0 wait_b = 0 index_b = 0", "= parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a = 0 index_a = 0 wait_b =", "a MML string for PSG channel A # @param chB_MML a MML string", "wait_a = 0 index_a = 0 wait_b = 0 index_b = 0 wait_c", "seq = chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else: self.psgplayer.setMute(True,", "= chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C)", "10, wait_b + ((eom & 2) == 2) * 10, wait_c + ((eom", "env = self.psgplayer.getEnvelopType() if env is not None and channel == ymz294.PSGPlayer.CHANNEL_A: self.psgplayer.setEnvelopType(env)", "-*- import ymz294 import mml import time class Sequencer: # initialize. # @param", "play sound by MML string # @param chA_MML a MML string for PSG", "A # @param chB_MML a MML string for PSG channel B # @param", "wait_a -= wait if wait_b > 0: wait_b -= wait if wait_c >", "wait_b -= wait if wait_c > 0: wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c))", "= 0 index_c = 0 eom = 0 #End of mml while(index_a <", "index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c <= 0:", "min(wait_a + ((eom & 1) == 1) * 10, wait_b + ((eom &", "1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c <= 0: if index_c", "mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a = 0", "0: if index_b < len(chB_seq): seq = chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq)", "for PSG channel C # @param core_freq frequency of the octave 4's A", "wait_b > 0: wait_b -= wait if wait_c > 0: wait_c -= wait", "wait_a > 0: wait_a -= wait if wait_b > 0: wait_b -= wait", "__play_tone__(self, channel, seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"]", "> 0: wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel, seq): if", "0 wait_c = 0 index_c = 0 eom = 0 #End of mml", "10) time.sleep(wait) if wait_a > 0: wait_a -= wait if wait_b > 0:", "chC_MML a MML string for PSG channel C # @param core_freq frequency of", "index_b = 0 wait_c = 0 index_c = 0 eom = 0 #End", "wait_c)) def __play_tone__(self, channel, seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"])", "+ ((eom & 1) == 1) * 10, wait_b + ((eom & 2)", "chB_seq[index_b] wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom", "if wait_a <= 0: if index_a < len(chA_seq): seq = chA_seq[index_a] wait_a =", "-= wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel, seq): if seq[\"freq\"] != 0:", "else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if wait_c <= 0: if index_c <", "* 10) time.sleep(wait) if wait_a > 0: wait_a -= wait if wait_b >", "core_freq frequency of the octave 4's A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440):", "0: if index_c < len(chC_seq): seq = chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq)", "0: wait_b -= wait if wait_c > 0: wait_c -= wait time.sleep(max(wait_a, wait_b,", "# @param chC_MML a MML string for PSG channel C # @param core_freq", "1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait = min(wait_a + ((eom &", "def __init__(self, psgplayer): self.psgplayer = psgplayer # play sound by MML string #", "channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if", "def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq", "wait_c > 0: wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel, seq):", "-= wait if wait_b > 0: wait_b -= wait if wait_c > 0:", "+= 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C) eom |= 4 wait = min(wait_a + ((eom", "wait_b, wait_c)) def __play_tone__(self, channel, seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel,", "self.psgplayer = psgplayer # play sound by MML string # @param chA_MML a", "((eom & 1) == 1) * 10, wait_b + ((eom & 2) ==", "0 index_a = 0 wait_b = 0 index_b = 0 wait_c = 0", "index_a < len(chA_seq): seq = chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a +=", "C # @param core_freq frequency of the octave 4's A def playMML(self, chA_MML,", "index_b < len(chB_seq) or index_c < len(chC_seq)): if wait_a <= 0: if index_a", "string # @param chA_MML a MML string for PSG channel A # @param", "PSG channel C # @param core_freq frequency of the octave 4's A def", "initialize. # @param psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer = psgplayer #", "psgplayer # play sound by MML string # @param chA_MML a MML string", "# initialize. # @param psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer): self.psgplayer = psgplayer", "sound by MML string # @param chA_MML a MML string for PSG channel", "chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a = 0 index_a = 0 wait_b", "ymz294 import mml import time class Sequencer: # initialize. # @param psgplayer ymz294.PSGPlayer", "4) * 10) time.sleep(wait) if wait_a > 0: wait_a -= wait if wait_b", "self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b", "eom |= 2 if wait_c <= 0: if index_c < len(chC_seq): seq =", "# -*- coding:utf-8 -*- import ymz294 import mml import time class Sequencer: #", "else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return if seq[\"tie_slur\"] == False: env =", "1 if wait_b <= 0: if index_b < len(chB_seq): seq = chB_seq[index_b] wait_b", "import ymz294 import mml import time class Sequencer: # initialize. # @param psgplayer", "if index_c < len(chC_seq): seq = chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c", "if wait_c > 0: wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel,", "time class Sequencer: # initialize. # @param psgplayer ymz294.PSGPlayer instance def __init__(self, psgplayer):", "chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML)", "= chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A)", "-= wait if wait_c > 0: wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c)) def", "!= 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel,", "0 eom = 0 #End of mml while(index_a < len(chA_seq) or index_b <", "False: env = self.psgplayer.getEnvelopType() if env is not None and channel == ymz294.PSGPlayer.CHANNEL_A:", "0: wait_a -= wait if wait_b > 0: wait_b -= wait if wait_c", "& 2) == 2) * 10, wait_c + ((eom & 4) == 4)", "<= 0: if index_a < len(chA_seq): seq = chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A,", "2) * 10, wait_c + ((eom & 4) == 4) * 10) time.sleep(wait)", "for PSG channel B # @param chC_MML a MML string for PSG channel", "+= 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom |= 1 if wait_b <= 0: if", "or index_b < len(chB_seq) or index_c < len(chC_seq)): if wait_a <= 0: if", "chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A) eom", "0: wait_c -= wait time.sleep(max(wait_a, wait_b, wait_c)) def __play_tone__(self, channel, seq): if seq[\"freq\"]", "def __play_tone__(self, channel, seq): if seq[\"freq\"] != 0: self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print", "= mml.Parser(core_freq) chA_seq = parser.parse(chA_MML) chB_seq = parser.parse(chB_MML) chC_seq = parser.parse(chC_MML) wait_a =", "chC_seq = parser.parse(chC_MML) wait_a = 0 index_a = 0 wait_b = 0 index_b", "if index_a < len(chA_seq): seq = chA_seq[index_a] wait_a = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq) index_a", "= 0 index_a = 0 wait_b = 0 index_b = 0 wait_c =", "self.psgplayer.setMute(False, channel) self.psgplayer.playSound(channel, seq[\"freq\"]) #print seq[\"freq\"] else: #mute self.psgplayer.setMute(True, channel) #self.psgplayer.playSound(channel, 20000) return", "a MML string for PSG channel B # @param chC_MML a MML string", "wait_b = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |=", "index_c < len(chC_seq): seq = chC_seq[index_c] wait_c = seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq) index_c +=", "of the octave 4's A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser =", "wait_b + ((eom & 2) == 2) * 10, wait_c + ((eom &", "import mml import time class Sequencer: # initialize. # @param psgplayer ymz294.PSGPlayer instance", "index_a = 0 wait_b = 0 index_b = 0 wait_c = 0 index_c", "octave 4's A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq", "while(index_a < len(chA_seq) or index_b < len(chB_seq) or index_c < len(chC_seq)): if wait_a", "A def playMML(self, chA_MML, chB_MML=\"\", chC_MML=\"\", core_freq=440): parser = mml.Parser(core_freq) chA_seq = parser.parse(chA_MML)", "4 wait = min(wait_a + ((eom & 1) == 1) * 10, wait_b", "if wait_b <= 0: if index_b < len(chB_seq): seq = chB_seq[index_b] wait_b =", "seq[\"duration\"] self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq) index_b += 1 else: self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B) eom |= 2 if" ]
[ "LICENSE). \"\"\"Jupyter support for Pants projects and PEX files.\"\"\" # N.B.: Flit uses", "# N.B.: Flit uses this as our distribution version. from IPython import InteractiveShell", "License, Version 2.0 (see LICENSE). \"\"\"Jupyter support for Pants projects and PEX files.\"\"\"", "and PEX files.\"\"\" # N.B.: Flit uses this as our distribution description. __version__", "under the Apache License, Version 2.0 (see LICENSE). \"\"\"Jupyter support for Pants projects", "2.0 (see LICENSE). \"\"\"Jupyter support for Pants projects and PEX files.\"\"\" # N.B.:", "the Apache License, Version 2.0 (see LICENSE). \"\"\"Jupyter support for Pants projects and", "as our distribution version. from IPython import InteractiveShell from .plugin import _PexEnvironmentBootstrapper def", "Licensed under the Apache License, Version 2.0 (see LICENSE). \"\"\"Jupyter support for Pants", "N.B.: Flit uses this as our distribution version. from IPython import InteractiveShell from", "as our distribution description. __version__ = \"0.0.4\" # N.B.: Flit uses this as", "CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). \"\"\"Jupyter support", "description. __version__ = \"0.0.4\" # N.B.: Flit uses this as our distribution version.", "our distribution description. __version__ = \"0.0.4\" # N.B.: Flit uses this as our", "= \"0.0.4\" # N.B.: Flit uses this as our distribution version. from IPython", "Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License,", "version. from IPython import InteractiveShell from .plugin import _PexEnvironmentBootstrapper def load_ipython_extension(ipython: InteractiveShell) ->", "this as our distribution description. __version__ = \"0.0.4\" # N.B.: Flit uses this", "our distribution version. from IPython import InteractiveShell from .plugin import _PexEnvironmentBootstrapper def load_ipython_extension(ipython:", "uses this as our distribution version. from IPython import InteractiveShell from .plugin import", "Flit uses this as our distribution version. from IPython import InteractiveShell from .plugin", "(see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). \"\"\"Jupyter", "Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0", "Flit uses this as our distribution description. __version__ = \"0.0.4\" # N.B.: Flit", "support for Pants projects and PEX files.\"\"\" # N.B.: Flit uses this as", "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache", "2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version", "Apache License, Version 2.0 (see LICENSE). \"\"\"Jupyter support for Pants projects and PEX", "\"\"\"Jupyter support for Pants projects and PEX files.\"\"\" # N.B.: Flit uses this", "projects and PEX files.\"\"\" # N.B.: Flit uses this as our distribution description.", "# Licensed under the Apache License, Version 2.0 (see LICENSE). \"\"\"Jupyter support for", "for Pants projects and PEX files.\"\"\" # N.B.: Flit uses this as our", "project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see", "uses this as our distribution description. __version__ = \"0.0.4\" # N.B.: Flit uses", "this as our distribution version. from IPython import InteractiveShell from .plugin import _PexEnvironmentBootstrapper", "files.\"\"\" # N.B.: Flit uses this as our distribution description. __version__ = \"0.0.4\"", "Pants projects and PEX files.\"\"\" # N.B.: Flit uses this as our distribution", "distribution description. __version__ = \"0.0.4\" # N.B.: Flit uses this as our distribution", "N.B.: Flit uses this as our distribution description. __version__ = \"0.0.4\" # N.B.:", "from IPython import InteractiveShell from .plugin import _PexEnvironmentBootstrapper def load_ipython_extension(ipython: InteractiveShell) -> None:", "IPython import InteractiveShell from .plugin import _PexEnvironmentBootstrapper def load_ipython_extension(ipython: InteractiveShell) -> None: ipython.register_magics(_PexEnvironmentBootstrapper)", "(see LICENSE). \"\"\"Jupyter support for Pants projects and PEX files.\"\"\" # N.B.: Flit", "\"0.0.4\" # N.B.: Flit uses this as our distribution version. from IPython import", "# N.B.: Flit uses this as our distribution description. __version__ = \"0.0.4\" #", "__version__ = \"0.0.4\" # N.B.: Flit uses this as our distribution version. from", "contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE).", "distribution version. from IPython import InteractiveShell from .plugin import _PexEnvironmentBootstrapper def load_ipython_extension(ipython: InteractiveShell)", "Version 2.0 (see LICENSE). \"\"\"Jupyter support for Pants projects and PEX files.\"\"\" #", "PEX files.\"\"\" # N.B.: Flit uses this as our distribution description. __version__ =" ]
[ "= MOVEMENT_SPEED # # def on_key_release(self, key, modifiers): # \"\"\"Called when the user", "* self.actual_fps + 1 / delta_time) / 100 food_per_100_turns = self.colony.food_per_turn() * 100", "a key. \"\"\" # # if key == arcade.key.UP or key == arcade.key.DOWN:", "ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers): # \"\"\"Called whenever a key is pressed.", "= arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x block.center_y = y wally.append(block)", "block.center_x = x + 8 * settings.SCALE block.center_y = y self.wall_list.append(block) def create_wall(self):", "- Food blob coo is altijd centrale coo # - Lijn tekenen bij", "== arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED # elif key == arcade.key.DOWN: # self.player_sprite.change_y", "settings.BASE_COLOR, ) block.center_x = x - 8 * settings.SCALE block.center_y = y self.wall_list.append(block)", "# This command has to happen before we start drawing arcade.start_render() # Draw", "+ 1 / delta_time) / 100 food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE}", "ant import Ant from colony import Colony # TODO # - Food blobs", "MOVEMENT_SPEED # elif key == arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED # elif key", "or key == arcade.key.DOWN: # self.player_sprite.change_y = 0 # elif key == arcade.key.LEFT", "elif key == arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED # # def on_key_release(self, key,", "Ant( settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback:", "food per 100 turns - {self.generation}\" ) arcade.start_render() for ant in self.ant_list: ant.move()", "block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x block.center_y = y if not", "settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x - 8 * settings.SCALE block.center_y = y", "modifiers): # \"\"\"Called whenever a key is pressed. \"\"\" # # if key", "self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}\" )", "self) def create_base(self): x = settings.SCREEN_WIDTH / 2 for y in range(0, round(20", "wall else: for block in wally: self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None): scale", "= arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS", "= x block.center_y = y wally.append(block) while True: wally = [] length =", "backtrack from settings import settings class Arena(arcade.Window): def __init__(self, width, height, title, generation_callback=None):", "arcade.start_render() for ant in self.ant_list: ant.move() self.generation += 1 #!! Dubbel naast colony.tick()", "y in range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x, y) for block in wally:", "= y self.wall_list.append(block) def create_wall(self): def block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(),", "else: start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale) start_y = random.randint(0, settings.SCREEN_HEIGHT", "self.actual_fps = (99 * self.actual_fps + 1 / delta_time) / 100 food_per_100_turns =", "block in wally: self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE *", "the sprites. self.wall_list.draw() self.food_list.draw() for ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def", "settings.WALL_COLOR, ) block.center_x = x block.center_y = y wally.append(block) while True: wally =", "scale) for x in range(start_x, start_x + size * scale, scale): for y", "-MOVEMENT_SPEED # elif key == arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED # elif key", "arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x - 8 * settings.SCALE block.center_y", "0 # elif key == arcade.key.LEFT or key == arcade.key.RIGHT: # self.player_sprite.change_x =", "/ delta_time) / 100 food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f}", "settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT - length) x", "for ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers): #", "scale, settings.FOOD_COLOR) block.center_x = x block.center_y = y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block)", "title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine", "settings.FOOD_COLOR) block.center_x = x block.center_y = y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def", "block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x block.center_y", "self.food_list.draw() for ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers):", "== arcade.key.RIGHT: # self.player_sprite.change_x = 0 def on_update(self, delta_time): self.colony.tick() self.actual_fps = (99", "size=10, start_coo=None): scale = settings.SCALE * 3 if start_coo: start_x, start_y = start_coo", "self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony =", "scale, scale): for y in range(start_y, start_y + size * scale, scale): block", "Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if __name__ == \"__main__\": window =", "import arcade from ant import Ant from colony import Colony # TODO #", "y) for block in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break it", "This command has to happen before we start drawing arcade.start_render() # Draw all", ") block.center_x = x - 8 * settings.SCALE block.center_y = y self.wall_list.append(block) block", "- size * scale) start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale) for", "def create_wall(self): def block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x", "== arcade.key.LEFT or key == arcade.key.RIGHT: # self.player_sprite.change_x = 0 def on_update(self, delta_time):", "= x - 8 * settings.SCALE block.center_y = y self.wall_list.append(block) block = arcade.SpriteSolidColor(", "blobs droppen met muis # - Food blob coo is altijd centrale coo", "settings.SCREEN_HEIGHT - size * scale) for x in range(start_x, start_x + size *", "Food blobs droppen met muis # - Food blob coo is altijd centrale", "random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5: # Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH -", "{self.generation}\" ) arcade.start_render() for ant in self.ant_list: ant.move() self.generation += 1 #!! Dubbel", "key == arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED # elif key == arcade.key.LEFT: #", "width, height, title, generation_callback=None): super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list =", "Colony() for _ in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH / 2, 0, self,", "= arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1 /", "length, settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT - length)", "# elif key == arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED # # def on_key_release(self,", "block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x + 8 *", "self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns", "= self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100", "settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x + 8 * settings.SCALE block.center_y = y", "TODO # - Food blobs 2x zo groot # - Food blobs droppen", "self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers): # \"\"\"Called whenever a", "height, title, generation_callback=None): super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True,", "= random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x, y)", "key == arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED # # def on_key_release(self, key, modifiers):", "= generation_callback # For testing purposes def setup(self): if settings.DRAW_BASE: self.create_base() for _", "delta_time) / 100 food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps,", "self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x", "blob coo is altijd centrale coo # - Lijn tekenen bij backtrack from", "* scale) start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale) for x in", "# self.player_sprite.change_y = 0 # elif key == arcade.key.LEFT or key == arcade.key.RIGHT:", "while True: wally = [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5:", "arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x block.center_y = y if not arcade.check_for_collision_with_list(block, self.wall_list):", "\"\"\"Called when the user releases a key. \"\"\" # # if key ==", "# - Food blobs droppen met muis # - Food blob coo is", "food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per", "== arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED # elif key == arcade.key.RIGHT: # self.player_sprite.change_x", "settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x - 8 * settings.SCALE block.center_y =", "for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(),", "self.player_sprite.change_x = 0 def on_update(self, delta_time): self.colony.tick() self.actual_fps = (99 * self.actual_fps +", "it off, try a new wall else: for block in wally: self.wall_list.append(block) return", "fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}\" ) arcade.start_render() for ant in", "muis # - Food blob coo is altijd centrale coo # - Lijn", "releases a key. \"\"\" # # if key == arcade.key.UP or key ==", "self.generation_callback(self.generation, self) if __name__ == \"__main__\": window = Arena(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT, settings.SCREEN_TITLE) window.setup() arcade.run()", "range(start_y, start_y + size * scale, scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x", "for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _ in range(settings.NUM_ANTS): ant", "+= 1 #!! Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if __name__ ==", "0 def on_update(self, delta_time): self.colony.tick() self.actual_fps = (99 * self.actual_fps + 1 /", "arcade.key.LEFT or key == arcade.key.RIGHT: # self.player_sprite.change_x = 0 def on_update(self, delta_time): self.colony.tick()", "self.actual_fps + 1 / delta_time) / 100 food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption(", "not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): # This command has to happen before", "settings.SCREEN_HEIGHT - length) x = random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y, start_y +", "# \"\"\"Called when the user releases a key. \"\"\" # # if key", ") arcade.start_render() for ant in self.ant_list: ant.move() self.generation += 1 #!! Dubbel naast", "if key == arcade.key.UP or key == arcade.key.DOWN: # self.player_sprite.change_y = 0 #", "for y in range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x, y) for block in", "{food_per_100_turns:0.0f} food per 100 turns - {self.generation}\" ) arcade.start_render() for ant in self.ant_list:", "# if key == arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED # elif key ==", "= settings.SCREEN_WIDTH / 2 for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block", "# def on_key_press(self, key, modifiers): # \"\"\"Called whenever a key is pressed. \"\"\"", "# - Food blobs 2x zo groot # - Food blobs droppen met", "self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine =", "off, try a new wall else: for block in wally: self.wall_list.append(block) return def", "self.wall_list): self.food_list.append(block) def on_draw(self): # This command has to happen before we start", "in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break it off, try a", "== arcade.key.UP or key == arcade.key.DOWN: # self.player_sprite.change_y = 0 # elif key", "# - Lijn tekenen bij backtrack from settings import settings class Arena(arcade.Window): def", "settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x + 8 * settings.SCALE block.center_y =", "{self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}\" ) arcade.start_render() for ant", "settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x block.center_y = y wally.append(block) while True: wally", "range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical start_y = random.randint(0,", "== arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED # # def on_key_release(self, key, modifiers): #", "block in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break it off, try", "settings.SCREEN_WIDTH - size * scale) start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale)", "self.wall_list.draw() self.food_list.draw() for ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self, key,", "has to happen before we start drawing arcade.start_render() # Draw all the sprites.", "= y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x", "random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x, y) for", "# if key == arcade.key.UP or key == arcade.key.DOWN: # self.player_sprite.change_y = 0", "or key == arcade.key.RIGHT: # self.player_sprite.change_x = 0 def on_update(self, delta_time): self.colony.tick() self.actual_fps", "- Food blobs droppen met muis # - Food blob coo is altijd", "when the user releases a key. \"\"\" # # if key == arcade.key.UP", "turns - {self.generation}\" ) arcade.start_render() for ant in self.ant_list: ant.move() self.generation += 1", "key. \"\"\" # # if key == arcade.key.UP or key == arcade.key.DOWN: #", "= (99 * self.actual_fps + 1 / delta_time) / 100 food_per_100_turns = self.colony.food_per_turn()", "size * scale, scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x block.center_y", "wally.append(block) while True: wally = [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() <", "round(20 * settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x =", "self.player_sprite.change_x = -MOVEMENT_SPEED # elif key == arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED #", "centrale coo # - Lijn tekenen bij backtrack from settings import settings class", "settings.SCREEN_WIDTH / 2 for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block =", "[] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5: # Horizontal start_x =", "in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, )", "= start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale) start_y =", "is altijd centrale coo # - Lijn tekenen bij backtrack from settings import", "= 0 self.generation_callback = generation_callback # For testing purposes def setup(self): if settings.DRAW_BASE:", "self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _ in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH /", "import Ant from colony import Colony # TODO # - Food blobs 2x", "\"\"\" # # if key == arcade.key.UP or key == arcade.key.DOWN: # self.player_sprite.change_y", "(99 * self.actual_fps + 1 / delta_time) / 100 food_per_100_turns = self.colony.food_per_turn() *", "coo is altijd centrale coo # - Lijn tekenen bij backtrack from settings", "# - Food blob coo is altijd centrale coo # - Lijn tekenen", "\"\"\"Called whenever a key is pressed. \"\"\" # # if key == arcade.key.UP:", "Draw all the sprites. self.wall_list.draw() self.food_list.draw() for ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0))", "/ 100 food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f}", "= [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5: # Horizontal start_x", "start drawing arcade.start_render() # Draw all the sprites. self.wall_list.draw() self.food_list.draw() for ant in", "self.player_sprite.change_y = -MOVEMENT_SPEED # elif key == arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED #", "if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x = settings.SCREEN_WIDTH / 2 for y", "= random.randint(0, settings.SCREEN_WIDTH - length) y = random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x,", "= random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5: # Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH", "1 / delta_time) / 100 food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} -", "Oops, break it off, try a new wall else: for block in wally:", "something self.generation = 0 self.generation_callback = generation_callback # For testing purposes def setup(self):", "def create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE * 3 if start_coo: start_x, start_y", "settings.SCALE block.center_y = y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x", "title, generation_callback=None): super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True)", "in range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _", "range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _ in", "for block in wally: self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE", "100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}\"", "0.5: # Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH - length) y = random.randint(0, settings.SCREEN_HEIGHT)", "zo groot # - Food blobs droppen met muis # - Food blob", "__init__(self, width, height, title, generation_callback=None): super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list", "block_at(x, y) else: # Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT - length) x =", "new wall else: for block in wally: self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None):", "# self.player_sprite.change_y = MOVEMENT_SPEED # elif key == arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED", "whenever a key is pressed. \"\"\" # # if key == arcade.key.UP: #", "# elif key == arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED # elif key ==", "met muis # - Food blob coo is altijd centrale coo # -", "super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list =", "= arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x block.center_y = y if not arcade.check_for_collision_with_list(block,", "arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): # This command has to happen before we", "in range(start_x, start_x + size * scale, scale): for y in range(start_y, start_y", "range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant)", "drawing arcade.start_render() # Draw all the sprites. self.wall_list.draw() self.food_list.draw() for ant in self.ant_list:", "def setup(self): if settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall() for _ in", "# Initializse to something self.generation = 0 self.generation_callback = generation_callback # For testing", "self.colony = Colony() for _ in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH / 2,", "generation_callback=None): super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list", "* settings.SCALE block.center_y = y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, )", "ant in self.ant_list: ant.move() self.generation += 1 #!! Dubbel naast colony.tick() if self.generation_callback:", "None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse to something", "ant = Ant( settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR)", "= random.randint(0, settings.SCREEN_WIDTH - size * scale) start_y = random.randint(0, settings.SCREEN_HEIGHT - size", "random import arcade from ant import Ant from colony import Colony # TODO", "y self.wall_list.append(block) def create_wall(self): def block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR,", "self.player_sprite.change_x = MOVEMENT_SPEED # # def on_key_release(self, key, modifiers): # \"\"\"Called when the", "1 #!! Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if __name__ == \"__main__\":", "y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): # This command has to", ") block.center_x = x block.center_y = y wally.append(block) while True: wally = []", "self.player_sprite.change_y = 0 # elif key == arcade.key.LEFT or key == arcade.key.RIGHT: #", "self.colony.tick() self.actual_fps = (99 * self.actual_fps + 1 / delta_time) / 100 food_per_100_turns", "self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse to something self.generation = 0", "start_x = random.randint(0, settings.SCREEN_WIDTH - length) y = random.randint(0, settings.SCREEN_HEIGHT) for x in", "key, modifiers): # \"\"\"Called whenever a key is pressed. \"\"\" # # if", "range(start_x, start_x + size * scale, scale): for y in range(start_y, start_y +", "else: for block in wally: self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None): scale =", "size * scale) start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale) for x", "settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse to something self.generation =", "start_coo: start_x, start_y = start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH - size *", "in range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x, y) for block in wally: if", "key == arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED # elif key == arcade.key.DOWN: #", "self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1", "y) else: # Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT - length) x = random.randint(0,", "create_base(self): x = settings.SCREEN_WIDTH / 2 for y in range(0, round(20 * settings.SCALE),", "block_at(x, y) for block in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break", "# ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers): # \"\"\"Called whenever a key is", "length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5: # Horizontal start_x = random.randint(0,", "tekenen bij backtrack from settings import settings class Arena(arcade.Window): def __init__(self, width, height,", "settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x - 8", "x - 8 * settings.SCALE block.center_y = y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(),", "Arena(arcade.Window): def __init__(self, width, height, title, generation_callback=None): super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True,", "user releases a key. \"\"\" # # if key == arcade.key.UP or key", "self.player_sprite.change_y = MOVEMENT_SPEED # elif key == arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED #", "start_x, start_y = start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale)", "for _ in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH / 2, 0, self, self.colony,", "x block.center_y = y wally.append(block) while True: wally = [] length = random.randint(settings.WALL_MIN(),", "/ 2, 0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self)", "/ settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse to something self.generation = 0 self.generation_callback", "key == arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED # elif key == arcade.key.RIGHT: #", "- length) x = random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y, start_y + length,", "Lijn tekenen bij backtrack from settings import settings class Arena(arcade.Window): def __init__(self, width,", "arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS #", "self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _ in range(settings.NUM_ANTS):", "wally: if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break it off, try a new", "self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x = settings.SCREEN_WIDTH / 2", "groot # - Food blobs droppen met muis # - Food blob coo", "2 for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(),", "naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if __name__ == \"__main__\": window = Arena(settings.SCREEN_WIDTH,", "def create_base(self): x = settings.SCREEN_WIDTH / 2 for y in range(0, round(20 *", ") self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x = settings.SCREEN_WIDTH /", "< 0.5: # Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH - length) y = random.randint(0,", "_ in range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for", "for x in range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical", "the user releases a key. \"\"\" # # if key == arcade.key.UP or", "break # Oops, break it off, try a new wall else: for block", "0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self):", "key == arcade.key.DOWN: # self.player_sprite.change_y = 0 # elif key == arcade.key.LEFT or", "size * scale, scale): for y in range(start_y, start_y + size * scale,", "scale, scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x block.center_y = y", "a new wall else: for block in wally: self.wall_list.append(block) return def create_food_blob(self, size=10,", "scale) start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale) for x in range(start_x,", "def block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x", "arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break it off, try a new wall else:", "if settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE)", "start_y = random.randint(0, settings.SCREEN_HEIGHT - length) x = random.randint(0, settings.SCREEN_WIDTH) for y in", "create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE * 3 if start_coo: start_x, start_y =", "block.center_y = y self.wall_list.append(block) def create_wall(self): def block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(),", "for block in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break it off,", "Initializse to something self.generation = 0 self.generation_callback = generation_callback # For testing purposes", "MOVEMENT_SPEED # # def on_key_release(self, key, modifiers): # \"\"\"Called when the user releases", "start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale) start_y = random.randint(0,", "purposes def setup(self): if settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall() for _", "arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x = settings.SCREEN_WIDTH / 2 for", "# Oops, break it off, try a new wall else: for block in", "== arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED # elif key == arcade.key.LEFT: # self.player_sprite.change_x", "True: wally = [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5: #", "- length) y = random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x, start_x + length,", "# For testing purposes def setup(self): if settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS):", "settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse to something self.generation = 0 self.generation_callback =", "= random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x, y)", "ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers): # \"\"\"Called", "+ 8 * settings.SCALE block.center_y = y self.wall_list.append(block) def create_wall(self): def block_at(x, y):", "settings.SCREEN_WIDTH) for y in range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x, y) for block", "y = random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x,", "settings.SCALE * 3 if start_coo: start_x, start_y = start_coo else: start_x = random.randint(0,", "block.center_x = x block.center_y = y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self):", "= arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x + 8 * settings.SCALE", "/ 2 for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor(", "start_x + size * scale, scale): for y in range(start_y, start_y + size", "command has to happen before we start drawing arcade.start_render() # Draw all the", "# self.player_sprite.change_x = MOVEMENT_SPEED # # def on_key_release(self, key, modifiers): # \"\"\"Called when", "self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse", "if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): # This command has to happen", "block.center_y = y wally.append(block) while True: wally = [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX())", "= 0 def on_update(self, delta_time): self.colony.tick() self.actual_fps = (99 * self.actual_fps + 1", "start_y + length, settings.WALL_THICKNESS()): block_at(x, y) for block in wally: if arcade.check_for_collision_with_list(block, self.wall_list):", "= settings.SCALE * 3 if start_coo: start_x, start_y = start_coo else: start_x =", "= x block.center_y = y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): #", "random.random() < 0.5: # Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH - length) y =", "settings.SCALE block.center_y = y self.wall_list.append(block) def create_wall(self): def block_at(x, y): block = arcade.SpriteSolidColor(", ") block.center_x = x + 8 * settings.SCALE block.center_y = y self.wall_list.append(block) def", "range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x, y) for block in wally: if arcade.check_for_collision_with_list(block,", "arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED # elif key == arcade.key.RIGHT: # self.player_sprite.change_x =", "for _ in range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony()", "start_coo=None): scale = settings.SCALE * 3 if start_coo: start_x, start_y = start_coo else:", "start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale) for x in range(start_x, start_x", "sprites. self.wall_list.draw() self.food_list.draw() for ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self,", "self.actual_fps = settings.MAX_FPS # Initializse to something self.generation = 0 self.generation_callback = generation_callback", "self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x + 8", "settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x -", "in range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical start_y =", "droppen met muis # - Food blob coo is altijd centrale coo #", "arcade from ant import Ant from colony import Colony # TODO # -", "blobs 2x zo groot # - Food blobs droppen met muis # -", "random.randint(0, settings.SCREEN_HEIGHT - length) x = random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y, start_y", "settings.WALL_THICKNESS()): block_at(x, y) for block in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops,", "size * scale) for x in range(start_x, start_x + size * scale, scale):", "Food blob coo is altijd centrale coo # - Lijn tekenen bij backtrack", "* scale, scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x block.center_y =", "length) x = random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y, start_y + length, settings.WALL_THICKNESS()):", "# Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH - length) y = random.randint(0, settings.SCREEN_HEIGHT) for", "= arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x - 8 * settings.SCALE", "block.center_x = x - 8 * settings.SCALE block.center_y = y self.wall_list.append(block) block =", "Colony # TODO # - Food blobs 2x zo groot # - Food", "all the sprites. self.wall_list.draw() self.food_list.draw() for ant in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) #", "settings.SCREEN_HEIGHT) for x in range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x, y) else: #", "bij backtrack from settings import settings class Arena(arcade.Window): def __init__(self, width, height, title,", "height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False)", "= -MOVEMENT_SPEED # elif key == arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED # #", "100 turns - {self.generation}\" ) arcade.start_render() for ant in self.ant_list: ant.move() self.generation +=", "block.center_x = x block.center_y = y wally.append(block) while True: wally = [] length", "# # if key == arcade.key.UP or key == arcade.key.DOWN: # self.player_sprite.change_y =", "is pressed. \"\"\" # # if key == arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED", "key, modifiers): # \"\"\"Called when the user releases a key. \"\"\" # #", "self.food_list.append(block) def on_draw(self): # This command has to happen before we start drawing", "length, settings.WALL_THICKNESS()): block_at(x, y) for block in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break #", "- 8 * settings.SCALE block.center_y = y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(),", "random.randint(0, settings.SCREEN_WIDTH - length) y = random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x, start_x", "elif key == arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED # elif key == arcade.key.LEFT:", "# Draw all the sprites. self.wall_list.draw() self.food_list.draw() for ant in self.ant_list: ant.draw() #", "y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x +", "= y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): # This command has", "arcade.key.UP or key == arcade.key.DOWN: # self.player_sprite.change_y = 0 # elif key ==", "= Colony() for _ in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH / 2, 0,", "def on_key_press(self, key, modifiers): # \"\"\"Called whenever a key is pressed. \"\"\" #", "arcade.key.DOWN: # self.player_sprite.change_y = 0 # elif key == arcade.key.LEFT or key ==", "colony import Colony # TODO # - Food blobs 2x zo groot #", "100 food_per_100_turns = self.colony.food_per_turn() * 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food", "import Colony # TODO # - Food blobs 2x zo groot # -", "delta_time): self.colony.tick() self.actual_fps = (99 * self.actual_fps + 1 / delta_time) / 100", "altijd centrale coo # - Lijn tekenen bij backtrack from settings import settings", "scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x = settings.SCREEN_WIDTH", "arcade.start_render() # Draw all the sprites. self.wall_list.draw() self.food_list.draw() for ant in self.ant_list: ant.draw()", "import random import arcade from ant import Ant from colony import Colony #", "else: # Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT - length) x = random.randint(0, settings.SCREEN_WIDTH)", "= Ant( settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if", "self.wall_list): break # Oops, break it off, try a new wall else: for", "-MOVEMENT_SPEED # elif key == arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED # # def", "+ length, settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT -", "random.randint(0, settings.SCREEN_HEIGHT - size * scale) for x in range(start_x, start_x + size", "testing purposes def setup(self): if settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall() for", "settings.WALL_MAX()) if random.random() < 0.5: # Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH - length)", "on_draw(self): # This command has to happen before we start drawing arcade.start_render() #", "block.center_y = y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x =", "wally = [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random() < 0.5: # Horizontal", "# TODO # - Food blobs 2x zo groot # - Food blobs", "arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x + 8 * settings.SCALE block.center_y", "range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _ in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH", "self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps =", "y wally.append(block) while True: wally = [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if random.random()", "= MOVEMENT_SPEED # elif key == arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED # elif", "key == arcade.key.RIGHT: # self.player_sprite.change_x = 0 def on_update(self, delta_time): self.colony.tick() self.actual_fps =", "- {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}\" ) arcade.start_render() for", "Food blobs 2x zo groot # - Food blobs droppen met muis #", "in self.ant_list: ant.move() self.generation += 1 #!! Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation,", "- Lijn tekenen bij backtrack from settings import settings class Arena(arcade.Window): def __init__(self,", "for x in range(start_x, start_x + size * scale, scale): for y in", "on_update(self, delta_time): self.colony.tick() self.actual_fps = (99 * self.actual_fps + 1 / delta_time) /", "def on_key_release(self, key, modifiers): # \"\"\"Called when the user releases a key. \"\"\"", "settings import settings class Arena(arcade.Window): def __init__(self, width, height, title, generation_callback=None): super().__init__(width, height,", "key == arcade.key.UP or key == arcade.key.DOWN: # self.player_sprite.change_y = 0 # elif", "in self.ant_list: ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers): # \"\"\"Called whenever", "y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR,", "# elif key == arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED # elif key ==", "if random.random() < 0.5: # Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH - length) y", "* settings.SCALE block.center_y = y self.wall_list.append(block) def create_wall(self): def block_at(x, y): block =", "wally: self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE * 3 if", "= arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None", "* settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x", "0 self.generation_callback = generation_callback # For testing purposes def setup(self): if settings.DRAW_BASE: self.create_base()", "random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x, y) else:", "f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}\" ) arcade.start_render()", "start_x + length, settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT", "settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x block.center_y = y wally.append(block) while True:", "in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _ in range(settings.NUM_ANTS): ant = Ant(", "settings class Arena(arcade.Window): def __init__(self, width, height, title, generation_callback=None): super().__init__(width, height, title) self.wall_list", "y in range(start_y, start_y + size * scale, scale): block = arcade.SpriteSolidColor(scale, scale,", "= None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse to", "for ant in self.ant_list: ant.move() self.generation += 1 #!! Dubbel naast colony.tick() if", "self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x = settings.SCREEN_WIDTH / 2 for y in", "arcade.SpriteList(is_static=True, use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if", "range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x", "scale = settings.SCALE * 3 if start_coo: start_x, start_y = start_coo else: start_x", "* 100 self.set_caption( f\"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns -", "= 0 # elif key == arcade.key.LEFT or key == arcade.key.RIGHT: # self.player_sprite.change_x", "arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED # # def on_key_release(self, key, modifiers): # \"\"\"Called", "= random.randint(0, settings.SCREEN_HEIGHT - size * scale) for x in range(start_x, start_x +", "arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED # elif key == arcade.key.DOWN: # self.player_sprite.change_y =", "use_spatial_hash=True) self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS:", "to something self.generation = 0 self.generation_callback = generation_callback # For testing purposes def", "self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE * 3 if start_coo:", "arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x block.center_y = y wally.append(block) while", "* scale, scale): for y in range(start_y, start_y + size * scale, scale):", "= settings.MAX_FPS # Initializse to something self.generation = 0 self.generation_callback = generation_callback #", "self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def create_base(self): x =", "start_y = start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale) start_y", "# elif key == arcade.key.LEFT or key == arcade.key.RIGHT: # self.player_sprite.change_x = 0", "random.randint(0, settings.SCREEN_WIDTH - size * scale) start_y = random.randint(0, settings.SCREEN_HEIGHT - size *", "settings.BASE_COLOR, ) block.center_x = x + 8 * settings.SCALE block.center_y = y self.wall_list.append(block)", "x block.center_y = y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): # This", "+ size * scale, scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x", "from settings import settings class Arena(arcade.Window): def __init__(self, width, height, title, generation_callback=None): super().__init__(width,", "- Food blobs 2x zo groot # - Food blobs droppen met muis", "x = settings.SCREEN_WIDTH / 2 for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()):", "= -MOVEMENT_SPEED # elif key == arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED # elif", "\"\"\" # # if key == arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED # elif", "= x + 8 * settings.SCALE block.center_y = y self.wall_list.append(block) def create_wall(self): def", "pressed. \"\"\" # # if key == arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED #", "_ in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE", "block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR, ) block.center_x = x - 8 *", "key is pressed. \"\"\" # # if key == arcade.key.UP: # self.player_sprite.change_y =", "ant.move() self.generation += 1 #!! Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if", "# # if key == arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED # elif key", "arcade.key.RIGHT: # self.player_sprite.change_x = 0 def on_update(self, delta_time): self.colony.tick() self.actual_fps = (99 *", "def on_draw(self): # This command has to happen before we start drawing arcade.start_render()", "modifiers): # \"\"\"Called when the user releases a key. \"\"\" # # if", "# self.player_sprite.change_x = -MOVEMENT_SPEED # elif key == arcade.key.RIGHT: # self.player_sprite.change_x = MOVEMENT_SPEED", "x = random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y, start_y + length, settings.WALL_THICKNESS()): block_at(x,", "break it off, try a new wall else: for block in wally: self.wall_list.append(block)", "Horizontal start_x = random.randint(0, settings.SCREEN_WIDTH - length) y = random.randint(0, settings.SCREEN_HEIGHT) for x", "self.wall_list.append(block) def create_wall(self): def block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, )", "from ant import Ant from colony import Colony # TODO # - Food", "block.center_y = y if not arcade.check_for_collision_with_list(block, self.wall_list): self.food_list.append(block) def on_draw(self): # This command", "self.generation_callback: self.generation_callback(self.generation, self) if __name__ == \"__main__\": window = Arena(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT, settings.SCREEN_TITLE) window.setup()", "return def create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE * 3 if start_coo: start_x,", "settings.SCREEN_WIDTH - length) y = random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x, start_x +", "before we start drawing arcade.start_render() # Draw all the sprites. self.wall_list.draw() self.food_list.draw() for", "= y wally.append(block) while True: wally = [] length = random.randint(settings.WALL_MIN(), settings.WALL_MAX()) if", "start_y + size * scale, scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x =", "scale): for y in range(start_y, start_y + size * scale, scale): block =", "block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x block.center_y = y", "_ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony = Colony() for _ in range(settings.NUM_ANTS): ant =", "on_key_press(self, key, modifiers): # \"\"\"Called whenever a key is pressed. \"\"\" # #", "if arcade.check_for_collision_with_list(block, self.wall_list): break # Oops, break it off, try a new wall", "3 if start_coo: start_x, start_y = start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH -", "in wally: self.wall_list.append(block) return def create_food_blob(self, size=10, start_coo=None): scale = settings.SCALE * 3", "create_wall(self): def block_at(x, y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x =", "# Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT - length) x = random.randint(0, settings.SCREEN_WIDTH) for", "ant.draw() # ant.draw_hit_box((255,0,0)) # def on_key_press(self, key, modifiers): # \"\"\"Called whenever a key", "self.generation += 1 #!! Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if __name__", "Ant from colony import Colony # TODO # - Food blobs 2x zo", "per 100 turns - {self.generation}\" ) arcade.start_render() for ant in self.ant_list: ant.move() self.generation", "coo # - Lijn tekenen bij backtrack from settings import settings class Arena(arcade.Window):", "#!! Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if __name__ == \"__main__\": window", "if self.generation_callback: self.generation_callback(self.generation, self) if __name__ == \"__main__\": window = Arena(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT, settings.SCREEN_TITLE)", "8 * settings.SCALE block.center_y = y self.wall_list.append(block) def create_wall(self): def block_at(x, y): block", "def __init__(self, width, height, title, generation_callback=None): super().__init__(width, height, title) self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True)", "self.generation_callback(self.generation, self) def create_base(self): x = settings.SCREEN_WIDTH / 2 for y in range(0,", "key == arcade.key.LEFT or key == arcade.key.RIGHT: # self.player_sprite.change_x = 0 def on_update(self,", "+ length, settings.WALL_THICKNESS()): block_at(x, y) for block in wally: if arcade.check_for_collision_with_list(block, self.wall_list): break", "# self.player_sprite.change_y = -MOVEMENT_SPEED # elif key == arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED", "settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation,", "elif key == arcade.key.LEFT: # self.player_sprite.change_x = -MOVEMENT_SPEED # elif key == arcade.key.RIGHT:", "== arcade.key.DOWN: # self.player_sprite.change_y = 0 # elif key == arcade.key.LEFT or key", "def on_update(self, delta_time): self.colony.tick() self.actual_fps = (99 * self.actual_fps + 1 / delta_time)", "happen before we start drawing arcade.start_render() # Draw all the sprites. self.wall_list.draw() self.food_list.draw()", "generation_callback # For testing purposes def setup(self): if settings.DRAW_BASE: self.create_base() for _ in", "x in range(start_x, start_x + size * scale, scale): for y in range(start_y,", "# # def on_key_release(self, key, modifiers): # \"\"\"Called when the user releases a", "length) y = random.randint(0, settings.SCREEN_HEIGHT) for x in range(start_x, start_x + length, settings.WALL_THICKNESS()):", "arcade.SpriteList(is_static=True, use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS)", "settings.MAX_FPS # Initializse to something self.generation = 0 self.generation_callback = generation_callback # For", "if start_coo: start_x, start_y = start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH - size", "try a new wall else: for block in wally: self.wall_list.append(block) return def create_food_blob(self,", "to happen before we start drawing arcade.start_render() # Draw all the sprites. self.wall_list.draw()", "# def on_key_release(self, key, modifiers): # \"\"\"Called when the user releases a key.", "scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR) block.center_x = x block.center_y = y if", "2x zo groot # - Food blobs droppen met muis # - Food", "elif key == arcade.key.LEFT or key == arcade.key.RIGHT: # self.player_sprite.change_x = 0 def", "= random.randint(0, settings.SCREEN_HEIGHT - length) x = random.randint(0, settings.SCREEN_WIDTH) for y in range(start_y,", "we start drawing arcade.start_render() # Draw all the sprites. self.wall_list.draw() self.food_list.draw() for ant", "# \"\"\"Called whenever a key is pressed. \"\"\" # # if key ==", "+ size * scale, scale): for y in range(start_y, start_y + size *", "self.generation = 0 self.generation_callback = generation_callback # For testing purposes def setup(self): if", "2, 0, self, self.colony, scale=settings.SCALE ) self.ant_list.append(ant) arcade.set_background_color(settings.FIELD_COLOR) if self.generation_callback: self.generation_callback(self.generation, self) def", "- size * scale) for x in range(start_x, start_x + size * scale,", "For testing purposes def setup(self): if settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall()", "a key is pressed. \"\"\" # # if key == arcade.key.UP: # self.player_sprite.change_y", "colony.tick() if self.generation_callback: self.generation_callback(self.generation, self) if __name__ == \"__main__\": window = Arena(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT,", "x + 8 * settings.SCALE block.center_y = y self.wall_list.append(block) def create_wall(self): def block_at(x,", "use_spatial_hash=True) self.ant_list = arcade.SpriteList(use_spatial_hash=False) self.physics_engine = None if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps", "start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale) start_y = random.randint(0, settings.SCREEN_HEIGHT -", "setup(self): if settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS):", "if settings.MAX_FPS: self.set_update_rate(1 / settings.MAX_FPS) self.actual_fps = settings.MAX_FPS # Initializse to something self.generation", "in range(settings.NUM_ANTS): ant = Ant( settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE )", "import settings class Arena(arcade.Window): def __init__(self, width, height, title, generation_callback=None): super().__init__(width, height, title)", "self.ant_list: ant.move() self.generation += 1 #!! Dubbel naast colony.tick() if self.generation_callback: self.generation_callback(self.generation, self)", "8 * settings.SCALE block.center_y = y self.wall_list.append(block) block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.BASE_COLOR,", "from colony import Colony # TODO # - Food blobs 2x zo groot", "# self.player_sprite.change_x = 0 def on_update(self, delta_time): self.colony.tick() self.actual_fps = (99 * self.actual_fps", "in range(start_y, start_y + size * scale, scale): block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR)", "if key == arcade.key.UP: # self.player_sprite.change_y = MOVEMENT_SPEED # elif key == arcade.key.DOWN:", "arcade.key.DOWN: # self.player_sprite.change_y = -MOVEMENT_SPEED # elif key == arcade.key.LEFT: # self.player_sprite.change_x =", "* 3 if start_coo: start_x, start_y = start_coo else: start_x = random.randint(0, settings.SCREEN_WIDTH", "settings.DRAW_BASE: self.create_base() for _ in range(settings.NUM_WALLS): self.create_wall() for _ in range(settings.NUM_FOOD_BLOBS): self.create_food_blob(settings.FOOD_BLOB_SIZE) self.colony", "class Arena(arcade.Window): def __init__(self, width, height, title, generation_callback=None): super().__init__(width, height, title) self.wall_list =", "Vertical start_y = random.randint(0, settings.SCREEN_HEIGHT - length) x = random.randint(0, settings.SCREEN_WIDTH) for y", "on_key_release(self, key, modifiers): # \"\"\"Called when the user releases a key. \"\"\" #", "for y in range(start_y, start_y + size * scale, scale): block = arcade.SpriteSolidColor(scale,", "x in range(start_x, start_x + length, settings.WALL_THICKNESS()): block_at(x, y) else: # Vertical start_y", "- {self.generation}\" ) arcade.start_render() for ant in self.ant_list: ant.move() self.generation += 1 #!!", "self.generation_callback = generation_callback # For testing purposes def setup(self): if settings.DRAW_BASE: self.create_base() for", "* scale) for x in range(start_x, start_x + size * scale, scale): for", "y): block = arcade.SpriteSolidColor( settings.WALL_THICKNESS(), settings.WALL_THICKNESS(), settings.WALL_COLOR, ) block.center_x = x block.center_y =" ]
[ "that handles everything related to getting and setting up the training and test", "up the training and test datasets ... Methods ------- download_data() Downloads the data", "getting and setting up the training and test datasets ... Methods ------- download_data()", "quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip > /dev/null #", "and setting up the training and test datasets ... Methods ------- download_data() Downloads", "from Google Drive and unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip',", "MakeDataset(): \"\"\" A class that handles everything related to getting and setting up", "everything related to getting and setting up the training and test datasets ...", "<filename>src/data/MakeDataset.py import gdown import zipfile from pathlib import Path class MakeDataset(): \"\"\" A", "training and test datasets ... Methods ------- download_data() Downloads the data from Google", "zipfile from pathlib import Path class MakeDataset(): \"\"\" A class that handles everything", "def download_unzip_data(self): \"\"\" Downloads the data from Google Drive and unzips it \"\"\"", "and unzips it \"\"\" def __init__(self, file_url): super().__init__() self.file_url = file_url def download_unzip_data(self):", "to getting and setting up the training and test datasets ... Methods -------", "it \"\"\" def __init__(self, file_url): super().__init__() self.file_url = file_url def download_unzip_data(self): \"\"\" Downloads", "Drive and unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with", "file_url): super().__init__() self.file_url = file_url def download_unzip_data(self): \"\"\" Downloads the data from Google", "= file_url def download_unzip_data(self): \"\"\" Downloads the data from Google Drive and unzips", "project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: #", "zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip > /dev/null # TEST #", "__init__(self, file_url): super().__init__() self.file_url = file_url def download_unzip_data(self): \"\"\" Downloads the data from", "... Methods ------- download_data() Downloads the data from Google Drive and unzips it", "data from Google Drive and unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url,", "as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip > /dev/null # TEST # Test 2", "Drive and unzips it \"\"\" def __init__(self, file_url): super().__init__() self.file_url = file_url def", "self.file_url = file_url def download_unzip_data(self): \"\"\" Downloads the data from Google Drive and", "import Path class MakeDataset(): \"\"\" A class that handles everything related to getting", "file_url def download_unzip_data(self): \"\"\" Downloads the data from Google Drive and unzips it", "\"\"\" Downloads the data from Google Drive and unzips it \"\"\" project_dir =", "the data from Google Drive and unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir)", "and unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file,", "------- download_data() Downloads the data from Google Drive and unzips it \"\"\" def", "test datasets ... Methods ------- download_data() Downloads the data from Google Drive and", "Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip", "Path class MakeDataset(): \"\"\" A class that handles everything related to getting and", "print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip", "= Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to)", "setting up the training and test datasets ... Methods ------- download_data() Downloads the", "the training and test datasets ... Methods ------- download_data() Downloads the data from", "import gdown import zipfile from pathlib import Path class MakeDataset(): \"\"\" A class", "download_unzip_data(self): \"\"\" Downloads the data from Google Drive and unzips it \"\"\" project_dir", "Google Drive and unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False)", "unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r')", "'./data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip > /dev/null", "related to getting and setting up the training and test datasets ... Methods", "download_data() Downloads the data from Google Drive and unzips it \"\"\" def __init__(self,", "Google Drive and unzips it \"\"\" def __init__(self, file_url): super().__init__() self.file_url = file_url", "gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip >", "unzips it \"\"\" def __init__(self, file_url): super().__init__() self.file_url = file_url def download_unzip_data(self): \"\"\"", "Methods ------- download_data() Downloads the data from Google Drive and unzips it \"\"\"", "handles everything related to getting and setting up the training and test datasets", "and test datasets ... Methods ------- download_data() Downloads the data from Google Drive", "\"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:", "datasets ... Methods ------- download_data() Downloads the data from Google Drive and unzips", "it \"\"\" project_dir = Path(__file__).resolve().parents[2] print(project_dir) gdown.download(self.file_url, './data/raw/raw.zip', quiet=False) #with zipfile.ZipFile(path_to_zip_file, 'r') as", "from Google Drive and unzips it \"\"\" def __init__(self, file_url): super().__init__() self.file_url =", "A class that handles everything related to getting and setting up the training", "gdown import zipfile from pathlib import Path class MakeDataset(): \"\"\" A class that", "Downloads the data from Google Drive and unzips it \"\"\" project_dir = Path(__file__).resolve().parents[2]", "data from Google Drive and unzips it \"\"\" def __init__(self, file_url): super().__init__() self.file_url", "pathlib import Path class MakeDataset(): \"\"\" A class that handles everything related to", "Downloads the data from Google Drive and unzips it \"\"\" def __init__(self, file_url):", "the data from Google Drive and unzips it \"\"\" def __init__(self, file_url): super().__init__()", "def __init__(self, file_url): super().__init__() self.file_url = file_url def download_unzip_data(self): \"\"\" Downloads the data", "#with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip > /dev/null # TEST", "'r') as zip_ref: # zip_ref.extractall(directory_to_extract_to) #!unzip ./horse2zebra.zip > /dev/null # TEST # Test", "class that handles everything related to getting and setting up the training and", "\"\"\" def __init__(self, file_url): super().__init__() self.file_url = file_url def download_unzip_data(self): \"\"\" Downloads the", "class MakeDataset(): \"\"\" A class that handles everything related to getting and setting", "from pathlib import Path class MakeDataset(): \"\"\" A class that handles everything related", "import zipfile from pathlib import Path class MakeDataset(): \"\"\" A class that handles", "super().__init__() self.file_url = file_url def download_unzip_data(self): \"\"\" Downloads the data from Google Drive", "\"\"\" A class that handles everything related to getting and setting up the" ]
[ "tkinter import messagebox from tkinter import Button window = tkinter.Tk() HEIGHT = window.winfo_height()", "command=click_button ) # placing a button to center of the window button_widget.place( relx=0.5,", "Button window = tkinter.Tk() HEIGHT = window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width:", "Width: {WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked a button!\") # initializing", "Button( window, text='Click me!', command=click_button ) # placing a button to center of", "click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked a button!\") # initializing button button_widget =", "messagebox from tkinter import Button window = tkinter.Tk() HEIGHT = window.winfo_height() WIDTH =", "clicked a button!\") # initializing button button_widget = Button( window, text='Click me!', command=click_button", "placing a button to center of the window button_widget.place( relx=0.5, rely=0.5, anchor=tkinter.CENTER )", "tkinter import Button window = tkinter.Tk() HEIGHT = window.winfo_height() WIDTH = window.winfo_width() print(f'Height:", "from tkinter import Button window = tkinter.Tk() HEIGHT = window.winfo_height() WIDTH = window.winfo_width()", "import tkinter from tkinter import messagebox from tkinter import Button window = tkinter.Tk()", "msg = messagebox.showinfo(\"Hello!\", \"You clicked a button!\") # initializing button button_widget = Button(", "tkinter from tkinter import messagebox from tkinter import Button window = tkinter.Tk() HEIGHT", "initializing button button_widget = Button( window, text='Click me!', command=click_button ) # placing a", "# initializing button button_widget = Button( window, text='Click me!', command=click_button ) # placing", "button_widget = Button( window, text='Click me!', command=click_button ) # placing a button to", "import Button window = tkinter.Tk() HEIGHT = window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT},", "{WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked a button!\") # initializing button", "# placing a button to center of the window button_widget.place( relx=0.5, rely=0.5, anchor=tkinter.CENTER", "window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\",", "window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked a", "a button!\") # initializing button button_widget = Button( window, text='Click me!', command=click_button )", "= tkinter.Tk() HEIGHT = window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def", "tkinter.Tk() HEIGHT = window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button():", "= messagebox.showinfo(\"Hello!\", \"You clicked a button!\") # initializing button button_widget = Button( window,", "window, text='Click me!', command=click_button ) # placing a button to center of the", "button button_widget = Button( window, text='Click me!', command=click_button ) # placing a button", "{HEIGHT}, Width: {WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked a button!\") #", "window = tkinter.Tk() HEIGHT = window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}')", "import messagebox from tkinter import Button window = tkinter.Tk() HEIGHT = window.winfo_height() WIDTH", "def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked a button!\") # initializing button button_widget", "HEIGHT = window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button(): msg", "print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked a button!\")", "from tkinter import messagebox from tkinter import Button window = tkinter.Tk() HEIGHT =", "button!\") # initializing button button_widget = Button( window, text='Click me!', command=click_button ) #", "a button to center of the window button_widget.place( relx=0.5, rely=0.5, anchor=tkinter.CENTER ) window.mainloop()", "= window.winfo_height() WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button(): msg =", "= window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You clicked", "= Button( window, text='Click me!', command=click_button ) # placing a button to center", "text='Click me!', command=click_button ) # placing a button to center of the window", "\"You clicked a button!\") # initializing button button_widget = Button( window, text='Click me!',", "WIDTH = window.winfo_width() print(f'Height: {HEIGHT}, Width: {WIDTH}') def click_button(): msg = messagebox.showinfo(\"Hello!\", \"You", ") # placing a button to center of the window button_widget.place( relx=0.5, rely=0.5,", "me!', command=click_button ) # placing a button to center of the window button_widget.place(", "messagebox.showinfo(\"Hello!\", \"You clicked a button!\") # initializing button button_widget = Button( window, text='Click" ]
[ "as F import torch.optim as optim import glob import os from skimage.io import", "folders=['mean','max','std'] for folder in folders: for k in range(3): tmp=imread(file_name + '_' +", "pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names):", "# add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r)", "in zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if self.split=='training':", "range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index]", "# for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in", "k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy()) imgs=imgs.permute(2,0,1) lbl=self.lbls[index] lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32) lbl2[lbl]=1 lbl=torch.from_numpy(lbl2)", "from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data =", "unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def __getitem__(self,", "img_list=[] folders=['mean','max','std'] for folder in folders: for k in range(3): tmp=imread(file_name + '_'", "self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file in file_names:", "pandas as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split", "add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy())", "zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)]", "torch.optim as optim import glob import os from skimage.io import imread from skimage.transform", "# max_mult_change=0.3 # for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3", "import torch.nn.functional as F import torch.optim as optim import glob import os from", "IPython.core.debugger import set_trace import torch import torch.nn as nn import torch.nn.functional as F", "if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file in", "img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy()) imgs=imgs.permute(2,0,1) lbl=self.lbls[index]", "data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist()", "import Config import pandas as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def", "import data import os from config import Config import pandas as pd from", "= pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist()", "pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[]", "img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3 # for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change", "as optim import glob import os from skimage.io import imread from skimage.transform import", "in file_names: for flip in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip)", "data import os from config import Config import pandas as pd from utils.rotate_fcns", "as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data", "folder + '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3", "import set_trace import torch import torch.nn as nn import torch.nn.functional as F import", "def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in folders:", "if self.split=='training': # max_mult_change=0.3 # for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change", "skimage.io import imread from skimage.transform import resize from torch.utils import data import os", "mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change #", "rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file", "in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def __getitem__(self, index):", "str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3 # for k", "pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1]", "nn import torch.nn.functional as F import torch.optim as optim import glob import os", "def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std']", "file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in folders: for k in", "np import matplotlib.pyplot as plt from IPython.core.debugger import set_trace import torch import torch.nn", "import resize from torch.utils import data import os from config import Config import", "file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file in file_names: for", "range(3): tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp)", "os.sep + folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20]", "'_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3 # for", "+ folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[]", "k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if", "from skimage.io import imread from skimage.transform import resize from torch.utils import data import", "as nn import torch.nn.functional as F import torch.optim as optim import glob import", "data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names): file_names.append((self.path + os.sep", "__getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in folders: for", "names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1] + os.sep", "# max_add_change=0.3 # for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for", "len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in", "import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as", "in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return", "range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k in range(len(img_list)): #", "self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip])", "class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file =", "tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3 # for k in range(len(img_list)): #", "self.split=='training': # max_mult_change=0.3 # for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change #", "for k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy()) imgs=imgs.permute(2,0,1) lbl=self.lbls[index] lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32) lbl2[lbl]=1", "import numpy as np import matplotlib.pyplot as plt from IPython.core.debugger import set_trace import", "DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path", "elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file in file_names: for flip", "img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2)", "import os from config import Config import pandas as pd from utils.rotate_fcns import", "flip in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self):", "self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None)", "for folder,name in zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1] + os.sep + name).replace('.mhd',''))", "as np import matplotlib.pyplot as plt from IPython.core.debugger import set_trace import torch import", "# for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for", "torch.utils import data import os from config import Config import pandas as pd", "optim import glob import os from skimage.io import imread from skimage.transform import resize", "from torch.utils import data import os from config import Config import pandas as", "from IPython.core.debugger import set_trace import torch import torch.nn as nn import torch.nn.functional as", "+ os.sep + folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing':", "'_' + folder + '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training':", "+ folder + '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': #", "import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import", "import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy()", "max_mult_change=0.3 # for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 #", "in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k in range(len(img_list)):", "r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in folders: for k in range(3):", "import torch.optim as optim import glob import os from skimage.io import imread from", "os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[]", "index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in folders: for k", "for flip in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def", "self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3]", "for folder in folders: for k in range(3): tmp=imread(file_name + '_' + folder", "torch.nn.functional as F import torch.optim as optim import glob import os from skimage.io", "in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if flip==1:", "numpy as np import matplotlib.pyplot as plt from IPython.core.debugger import set_trace import torch", ") tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3 # for k in range(len(img_list)):", "skimage.transform import resize from torch.utils import data import os from config import Config", "self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index]", "matplotlib.pyplot as plt from IPython.core.debugger import set_trace import torch import torch.nn as nn", "name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file", "import glob import os from skimage.io import imread from skimage.transform import resize from", "self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file in file_names: for flip in [0,1]: for", "xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name", "self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data =", "os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names): file_names.append((self.path +", "plt from IPython.core.debugger import set_trace import torch import torch.nn as nn import torch.nn.functional", "__init__(self, split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx')", "set_trace import torch import torch.nn as nn import torch.nn.functional as F import torch.optim", "import os from skimage.io import imread from skimage.transform import resize from torch.utils import", "import matplotlib.pyplot as plt from IPython.core.debugger import set_trace import torch import torch.nn as", "os from skimage.io import imread from skimage.transform import resize from torch.utils import data", "def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path +", "[0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names)", "self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[]", "flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in folders: for k in range(3): tmp=imread(file_name +", "resize from torch.utils import data import os from config import Config import pandas", "self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file in file_names: for flip in", "# if self.split=='training': # max_mult_change=0.3 # for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change #", "+ os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[]", "+ os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names): file_names.append((self.path", "folder in folders: for k in range(3): tmp=imread(file_name + '_' + folder +", "folders: for k in range(3): tmp=imread(file_name + '_' + folder + '_'+ str(k+1)", "self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for", "self.lbls=[] for file in file_names: for flip in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]):", "for k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k", "split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\") self.rots_table=data.loc[:,:].to_numpy() xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data", "file_names=[] for folder,name in zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1] + os.sep +", "glob import os from skimage.io import imread from skimage.transform import resize from torch.utils", "file_names.append((self.path + os.sep + folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif", "for k in range(3): tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png'", "+ '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3 #", "pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data", "k in range(3): tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' )", "import pandas as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data):", "config import Config import pandas as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset):", "return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder", "for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num) def __len__(self): return len(self.file_names) def", "in folders: for k in range(3): tmp=imread(file_name + '_' + folder + '_'+", "from config import Config import pandas as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class", "= pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx') data = pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in", "flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for folder in folders: for k in range(3): tmp=imread(file_name", "file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for file in file_names: for flip in [0,1]:", "max_add_change=0.3 # for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k", "Config import pandas as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self,", "file_names: for flip in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:]) self.flip.append(flip) self.lbls.append(unique_rot_num)", "folder,name in zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if", "# mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change", "= pd.read_excel(xl_file,header=None) folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names): file_names.append((self.path + os.sep +", "self.flip=[] self.lbls=[] for file in file_names: for flip in [0,1]: for unique_rot_num in", "torch.nn as nn import torch.nn.functional as F import torch.optim as optim import glob", "# img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change", "utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d class DataLoader2D(data.Dataset): def __init__(self, split,path_to_data): self.split=split self.path=path_to_data data = pd.read_csv(\"utils/rot_dict_unique.csv\")", "+ name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[] self.flip=[] self.lbls=[] for", "k in range(len(img_list)): # mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change # img_list[k]=img_list[k]*mult_change # max_add_change=0.3 # for k in", "for k in range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3):", "+ '_' + folder + '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if", "imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy()) imgs=imgs.permute(2,0,1) lbl=self.lbls[index] lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32)", "for file in file_names: for flip in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file)", "as plt from IPython.core.debugger import set_trace import torch import torch.nn as nn import", "__len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] r=self.vec[index][0:3] flip=self.flip[index] flip=np.array([flip]) img_list=[] folders=['mean','max','std'] for", "# img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy()) imgs=imgs.permute(2,0,1)", "import imread from skimage.transform import resize from torch.utils import data import os from", "F import torch.optim as optim import glob import os from skimage.io import imread", "self.vec=[] self.flip=[] self.lbls=[] for file in file_names: for flip in [0,1]: for unique_rot_num", "in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy()) imgs=imgs.permute(2,0,1) lbl=self.lbls[index] lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32) lbl2[lbl]=1 lbl=torch.from_numpy(lbl2) return", "folders=data.loc[:,0].tolist() names=data.loc[:,1].tolist() file_names=[] for folder,name in zip(folders,names): file_names.append((self.path + os.sep + folder.split('\\\\')[-1] +", "imread from skimage.transform import resize from torch.utils import data import os from config", "os from config import Config import pandas as pd from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d", "range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3]) imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r) imgs=torch.from_numpy(imgs.copy()) imgs=imgs.permute(2,0,1) lbl=self.lbls[index] lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32) lbl2[lbl]=1 lbl=torch.from_numpy(lbl2) return imgs,lbl", "from skimage.transform import resize from torch.utils import data import os from config import", "tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) #", "+'.png' ) tmp=tmp.astype(np.float32)/255-0.5 img_list.append(tmp) # if self.split=='training': # max_mult_change=0.3 # for k in", "folder.split('\\\\')[-1] + os.sep + name).replace('.mhd','')) if self.split=='training': file_names=file_names[:int(len(file_names)*0.8)] elif self.split=='testing': file_names=file_names[int(len(file_names)*0.8):-20] self.file_names=[] self.vec=[]", "in range(3): tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' ) tmp=tmp.astype(np.float32)/255-0.5", "torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim", "range(len(img_list)): # add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change # img_list[k]=img_list[k]+add_change imgs=np.stack(img_list,axis=2) for k in range(0,9,3): if flip==1: imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3])", "file in file_names: for flip in [0,1]: for unique_rot_num in range(self.rots_table.shape[0]): self.file_names.append(file) self.vec.append(self.rots_table[unique_rot_num,:])" ]
[ "core.monorepo_stack import MonorepoStack from core.pipelines_stack import PipelineStack app = cdk.App() core = MonorepoStack(app,", "import (core as cdk) from core.monorepo_stack import MonorepoStack from core.pipelines_stack import PipelineStack app", "(core as cdk) from core.monorepo_stack import MonorepoStack from core.pipelines_stack import PipelineStack app =", "from core.pipelines_stack import PipelineStack app = cdk.App() core = MonorepoStack(app, \"MonoRepoStack\") PipelineStack(app, \"PipelinesStack\",", "as cdk) from core.monorepo_stack import MonorepoStack from core.pipelines_stack import PipelineStack app = cdk.App()", "cdk) from core.monorepo_stack import MonorepoStack from core.pipelines_stack import PipelineStack app = cdk.App() core", "aws_cdk import (core as cdk) from core.monorepo_stack import MonorepoStack from core.pipelines_stack import PipelineStack", "import PipelineStack app = cdk.App() core = MonorepoStack(app, \"MonoRepoStack\") PipelineStack(app, \"PipelinesStack\", core.exported_monorepo) app.synth()", "#!/usr/bin/env python3 from aws_cdk import (core as cdk) from core.monorepo_stack import MonorepoStack from", "from aws_cdk import (core as cdk) from core.monorepo_stack import MonorepoStack from core.pipelines_stack import", "core.pipelines_stack import PipelineStack app = cdk.App() core = MonorepoStack(app, \"MonoRepoStack\") PipelineStack(app, \"PipelinesStack\", core.exported_monorepo)", "import MonorepoStack from core.pipelines_stack import PipelineStack app = cdk.App() core = MonorepoStack(app, \"MonoRepoStack\")", "MonorepoStack from core.pipelines_stack import PipelineStack app = cdk.App() core = MonorepoStack(app, \"MonoRepoStack\") PipelineStack(app,", "from core.monorepo_stack import MonorepoStack from core.pipelines_stack import PipelineStack app = cdk.App() core =", "python3 from aws_cdk import (core as cdk) from core.monorepo_stack import MonorepoStack from core.pipelines_stack" ]
[ "Pesos / v_Dolar Dolares = str(Dolares) print(\"Tienes $\" + Dolares + \" Dolares\")", "Pesos = float(Pesos) v_Dolar = 4033 Dolares = Pesos / v_Dolar Dolares =", "\") Pesos = float(Pesos) v_Dolar = 4033 Dolares = Pesos / v_Dolar Dolares", "Colombianos tiene?: \") Pesos = float(Pesos) v_Dolar = 4033 Dolares = Pesos /", "v_Dolar = 4033 Dolares = Pesos / v_Dolar Dolares = str(Dolares) print(\"Tienes $\"", "4033 Dolares = Pesos / v_Dolar Dolares = str(Dolares) print(\"Tienes $\" + Dolares", "Pesos = input(\"¿Cuántos Pesos Colombianos tiene?: \") Pesos = float(Pesos) v_Dolar = 4033", "= float(Pesos) v_Dolar = 4033 Dolares = Pesos / v_Dolar Dolares = str(Dolares)", "input(\"¿Cuántos Pesos Colombianos tiene?: \") Pesos = float(Pesos) v_Dolar = 4033 Dolares =", "= 4033 Dolares = Pesos / v_Dolar Dolares = str(Dolares) print(\"Tienes $\" +", "Dolares = Pesos / v_Dolar Dolares = str(Dolares) print(\"Tienes $\" + Dolares +", "float(Pesos) v_Dolar = 4033 Dolares = Pesos / v_Dolar Dolares = str(Dolares) print(\"Tienes", "= Pesos / v_Dolar Dolares = str(Dolares) print(\"Tienes $\" + Dolares + \"", "= input(\"¿Cuántos Pesos Colombianos tiene?: \") Pesos = float(Pesos) v_Dolar = 4033 Dolares", "tiene?: \") Pesos = float(Pesos) v_Dolar = 4033 Dolares = Pesos / v_Dolar", "Pesos Colombianos tiene?: \") Pesos = float(Pesos) v_Dolar = 4033 Dolares = Pesos" ]
[]
[ "nxt = now * k if nxt not in s: heapq.heappush(h, nxt) s.add(nxt)", "= [1] s = set([1]) while n > 1: now = heapq.heappop(h) for", "now = heapq.heappop(h) for k in (2, 3, 5): nxt = now *", "int :rtype: int \"\"\" h = [1] s = set([1]) while n >", "(2, 3, 5): nxt = now * k if nxt not in s:", "* k if nxt not in s: heapq.heappush(h, nxt) s.add(nxt) n -= 1", "for k in (2, 3, 5): nxt = now * k if nxt", "k in (2, 3, 5): nxt = now * k if nxt not", "if nxt not in s: heapq.heappush(h, nxt) s.add(nxt) n -= 1 return h[0]", "\"\"\" :type n: int :rtype: int \"\"\" h = [1] s = set([1])", "Solution(object): def nthUglyNumber(self, n): \"\"\" :type n: int :rtype: int \"\"\" h =", "<gh_stars>1-10 class Solution(object): def nthUglyNumber(self, n): \"\"\" :type n: int :rtype: int \"\"\"", "n: int :rtype: int \"\"\" h = [1] s = set([1]) while n", "int \"\"\" h = [1] s = set([1]) while n > 1: now", "n > 1: now = heapq.heappop(h) for k in (2, 3, 5): nxt", ":type n: int :rtype: int \"\"\" h = [1] s = set([1]) while", "\"\"\" h = [1] s = set([1]) while n > 1: now =", "in (2, 3, 5): nxt = now * k if nxt not in", "[1] s = set([1]) while n > 1: now = heapq.heappop(h) for k", "nthUglyNumber(self, n): \"\"\" :type n: int :rtype: int \"\"\" h = [1] s", ":rtype: int \"\"\" h = [1] s = set([1]) while n > 1:", "def nthUglyNumber(self, n): \"\"\" :type n: int :rtype: int \"\"\" h = [1]", "1: now = heapq.heappop(h) for k in (2, 3, 5): nxt = now", "class Solution(object): def nthUglyNumber(self, n): \"\"\" :type n: int :rtype: int \"\"\" h", "= now * k if nxt not in s: heapq.heappush(h, nxt) s.add(nxt) n", "k if nxt not in s: heapq.heappush(h, nxt) s.add(nxt) n -= 1 return", "5): nxt = now * k if nxt not in s: heapq.heappush(h, nxt)", "h = [1] s = set([1]) while n > 1: now = heapq.heappop(h)", "> 1: now = heapq.heappop(h) for k in (2, 3, 5): nxt =", "n): \"\"\" :type n: int :rtype: int \"\"\" h = [1] s =", "now * k if nxt not in s: heapq.heappush(h, nxt) s.add(nxt) n -=", "heapq.heappop(h) for k in (2, 3, 5): nxt = now * k if", "while n > 1: now = heapq.heappop(h) for k in (2, 3, 5):", "3, 5): nxt = now * k if nxt not in s: heapq.heappush(h,", "set([1]) while n > 1: now = heapq.heappop(h) for k in (2, 3,", "= heapq.heappop(h) for k in (2, 3, 5): nxt = now * k", "= set([1]) while n > 1: now = heapq.heappop(h) for k in (2,", "s = set([1]) while n > 1: now = heapq.heappop(h) for k in" ]
[ "'#fff') draw = ImageDraw.Draw(img) for _ in range(config['count']): # Select colour if config['color']['random']", "or config['shape'] == 'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed']", "img.size[0]), random.randint(0, img.size[1]) # Draw shape shape = config['shape'] if shape == 'line':", "from PIL import Image, ImageDraw config = { 'count' : 10, 'color' :", ": (180, 10, 240) }, 'shape' : 'rectangle' } img = Image.new(\"RGB\", (400,", "= random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw shape shape = config['shape'] if shape", "*end), 0, random.randint(-180, 180), fill=rgb) elif shape == 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0,", "random.randint(0, img.size[1]) end = random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw shape shape =", ": 10, 'color' : { 'random' : True, 'fixed' : (180, 10, 240)", "draw.line((*start, *end), fill=rgb) elif shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape ==", "# Draw shape shape = config['shape'] if shape == 'line': draw.line((*start, *end), fill=rgb)", ": { 'random' : True, 'fixed' : (180, 10, 240) }, 'shape' :", "== 'arc': draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb) elif shape == 'pieslice': draw.pieslice((*start,", "= config['color']['fixed'] # Get random endpoints for shape start = random.randint(0, img.size[0]), random.randint(0,", ": 'rectangle' } img = Image.new(\"RGB\", (400, 400), '#fff') draw = ImageDraw.Draw(img) for", "# Select colour if config['color']['random'] or config['shape'] == 'rectangle': rgb = random.randint(128,255), random.randint(0,255),", "endpoints for shape start = random.randint(0, img.size[0]), random.randint(0, img.size[1]) end = random.randint(0, img.size[0]),", "random.randint(0, img.size[0]), random.randint(0, img.size[1]) end = random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw shape", "= config['shape'] if shape == 'line': draw.line((*start, *end), fill=rgb) elif shape == 'rectangle':", "400), '#fff') draw = ImageDraw.Draw(img) for _ in range(config['count']): # Select colour if", "fill=rgb) elif shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape == 'arc': draw.arc((*start,", "colour if config['color']['random'] or config['shape'] == 'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else:", "shape == 'arc': draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb) elif shape == 'pieslice':", "180), fill=rgb) elif shape == 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb) #", "== 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb) # Store in file img.save(\"art.{}.{}.jpg\".format(shape,", "shape start = random.randint(0, img.size[0]), random.randint(0, img.size[1]) end = random.randint(0, img.size[0]), random.randint(0, img.size[1])", "# Get random endpoints for shape start = random.randint(0, img.size[0]), random.randint(0, img.size[1]) end", "240) }, 'shape' : 'rectangle' } img = Image.new(\"RGB\", (400, 400), '#fff') draw", "random.randint(80,255) else: rgb = config['color']['fixed'] # Get random endpoints for shape start =", "shape == 'line': draw.line((*start, *end), fill=rgb) elif shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb)", "= random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed'] # Get random endpoints for", "Get random endpoints for shape start = random.randint(0, img.size[0]), random.randint(0, img.size[1]) end =", "img.size[0]), random.randint(0, img.size[1]) end = random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw shape shape", "elif shape == 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb) # Store in", "'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed'] # Get random", "'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb) # Store in file img.save(\"art.{}.{}.jpg\".format(shape, random.randint(1000,9999)))", "draw = ImageDraw.Draw(img) for _ in range(config['count']): # Select colour if config['color']['random'] or", "rgb = config['color']['fixed'] # Get random endpoints for shape start = random.randint(0, img.size[0]),", "Select colour if config['color']['random'] or config['shape'] == 'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255)", "in range(config['count']): # Select colour if config['color']['random'] or config['shape'] == 'rectangle': rgb =", "for shape start = random.randint(0, img.size[0]), random.randint(0, img.size[1]) end = random.randint(0, img.size[0]), random.randint(0,", "*end), fill=rgb) elif shape == 'arc': draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb) elif", "shape shape = config['shape'] if shape == 'line': draw.line((*start, *end), fill=rgb) elif shape", "random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed'] # Get random endpoints for shape start", "}, 'shape' : 'rectangle' } img = Image.new(\"RGB\", (400, 400), '#fff') draw =", "0, random.randint(-180, 180), fill=rgb) elif shape == 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180),", "config = { 'count' : 10, 'color' : { 'random' : True, 'fixed'", "(180, 10, 240) }, 'shape' : 'rectangle' } img = Image.new(\"RGB\", (400, 400),", "'rectangle' } img = Image.new(\"RGB\", (400, 400), '#fff') draw = ImageDraw.Draw(img) for _", "= Image.new(\"RGB\", (400, 400), '#fff') draw = ImageDraw.Draw(img) for _ in range(config['count']): #", "ImageDraw.Draw(img) for _ in range(config['count']): # Select colour if config['color']['random'] or config['shape'] ==", "'shape' : 'rectangle' } img = Image.new(\"RGB\", (400, 400), '#fff') draw = ImageDraw.Draw(img)", "img.size[1]) end = random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw shape shape = config['shape']", "10, 'color' : { 'random' : True, 'fixed' : (180, 10, 240) },", "random.randint(0, img.size[1]) # Draw shape shape = config['shape'] if shape == 'line': draw.line((*start,", "if shape == 'line': draw.line((*start, *end), fill=rgb) elif shape == 'rectangle': draw.rectangle((*start, *end),", "random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw shape shape = config['shape'] if shape ==", "Draw shape shape = config['shape'] if shape == 'line': draw.line((*start, *end), fill=rgb) elif", "Image, ImageDraw config = { 'count' : 10, 'color' : { 'random' :", "start = random.randint(0, img.size[0]), random.randint(0, img.size[1]) end = random.randint(0, img.size[0]), random.randint(0, img.size[1]) #", "== 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape == 'arc': draw.arc((*start, *end), 0, random.randint(-180,", "elif shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape == 'arc': draw.arc((*start, *end),", "config['color']['random'] or config['shape'] == 'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb =", "Image.new(\"RGB\", (400, 400), '#fff') draw = ImageDraw.Draw(img) for _ in range(config['count']): # Select", "import Image, ImageDraw config = { 'count' : 10, 'color' : { 'random'", "config['shape'] if shape == 'line': draw.line((*start, *end), fill=rgb) elif shape == 'rectangle': draw.rectangle((*start,", "for _ in range(config['count']): # Select colour if config['color']['random'] or config['shape'] == 'rectangle':", "fill=rgb) elif shape == 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb) # Store", "= ImageDraw.Draw(img) for _ in range(config['count']): # Select colour if config['color']['random'] or config['shape']", "'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape == 'arc': draw.arc((*start, *end), 0, random.randint(-180, 180),", "== 'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed'] # Get", "= { 'count' : 10, 'color' : { 'random' : True, 'fixed' :", "import sys import random from PIL import Image, ImageDraw config = { 'count'", "'count' : 10, 'color' : { 'random' : True, 'fixed' : (180, 10,", "10, 240) }, 'shape' : 'rectangle' } img = Image.new(\"RGB\", (400, 400), '#fff')", "'line': draw.line((*start, *end), fill=rgb) elif shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape", "config['shape'] == 'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed'] #", "draw.rectangle((*start, *end), fill=rgb) elif shape == 'arc': draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb)", "_ in range(config['count']): # Select colour if config['color']['random'] or config['shape'] == 'rectangle': rgb", "range(config['count']): # Select colour if config['color']['random'] or config['shape'] == 'rectangle': rgb = random.randint(128,255),", "fill=rgb) elif shape == 'arc': draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb) elif shape", "ImageDraw config = { 'count' : 10, 'color' : { 'random' : True,", "img.size[1]) # Draw shape shape = config['shape'] if shape == 'line': draw.line((*start, *end),", "random.randint(-180, 180), fill=rgb) elif shape == 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb)", "= random.randint(0, img.size[0]), random.randint(0, img.size[1]) end = random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw", "shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape == 'arc': draw.arc((*start, *end), 0,", "{ 'random' : True, 'fixed' : (180, 10, 240) }, 'shape' : 'rectangle'", "config['color']['fixed'] # Get random endpoints for shape start = random.randint(0, img.size[0]), random.randint(0, img.size[1])", "<reponame>DevopediaOrg/python-for-kids import sys import random from PIL import Image, ImageDraw config = {", "random from PIL import Image, ImageDraw config = { 'count' : 10, 'color'", "PIL import Image, ImageDraw config = { 'count' : 10, 'color' : {", "random endpoints for shape start = random.randint(0, img.size[0]), random.randint(0, img.size[1]) end = random.randint(0,", "shape == 'pieslice': draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb) # Store in file", "(400, 400), '#fff') draw = ImageDraw.Draw(img) for _ in range(config['count']): # Select colour", "} img = Image.new(\"RGB\", (400, 400), '#fff') draw = ImageDraw.Draw(img) for _ in", "'random' : True, 'fixed' : (180, 10, 240) }, 'shape' : 'rectangle' }", ": True, 'fixed' : (180, 10, 240) }, 'shape' : 'rectangle' } img", "else: rgb = config['color']['fixed'] # Get random endpoints for shape start = random.randint(0,", "random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed'] # Get random endpoints for shape", "== 'line': draw.line((*start, *end), fill=rgb) elif shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif", "*end), fill=rgb) elif shape == 'rectangle': draw.rectangle((*start, *end), fill=rgb) elif shape == 'arc':", "shape = config['shape'] if shape == 'line': draw.line((*start, *end), fill=rgb) elif shape ==", "'arc': draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb) elif shape == 'pieslice': draw.pieslice((*start, *end),", "import random from PIL import Image, ImageDraw config = { 'count' : 10,", "'fixed' : (180, 10, 240) }, 'shape' : 'rectangle' } img = Image.new(\"RGB\",", "elif shape == 'arc': draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb) elif shape ==", "True, 'fixed' : (180, 10, 240) }, 'shape' : 'rectangle' } img =", "img = Image.new(\"RGB\", (400, 400), '#fff') draw = ImageDraw.Draw(img) for _ in range(config['count']):", "sys import random from PIL import Image, ImageDraw config = { 'count' :", "'color' : { 'random' : True, 'fixed' : (180, 10, 240) }, 'shape'", "if config['color']['random'] or config['shape'] == 'rectangle': rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb", "rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255) else: rgb = config['color']['fixed'] # Get random endpoints", "end = random.randint(0, img.size[0]), random.randint(0, img.size[1]) # Draw shape shape = config['shape'] if", "{ 'count' : 10, 'color' : { 'random' : True, 'fixed' : (180,", "draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb) elif shape == 'pieslice': draw.pieslice((*start, *end), 0," ]
[ "QtGui, QtCore # (the example applies equally well to PySide) import sys import", "widget to hold everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background color", "import pyqtgraph as pg import numpy as np ## Always start by initializing", "grid layout to manage the widgets size and position layout = QtGui.QGridLayout() w.setLayout(layout)", "in middle-left layout.addWidget(listw, 2, 0) # list widget goes in bottom-left layout.addWidget(pltW1, 0,", "app = QtCore.QCoreApplication.instance() if app is None: app = QtGui.QApplication(sys.argv) ## Define a", "'k') ## Create some widgets to be placed inside btn = QtGui.QPushButton('press me')", "## Always start by initializing Qt (only once per application) #app = QtGui.QApplication([])", "3, 1) # plot goes on right side, spanning 3 rows #layout.addWidget(pw2, 3,", "= np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ## Create a grid layout to manage", "import QtGui, QtCore # (the example applies equally well to PySide) import sys", "np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ## Create", "None: app = QtGui.QApplication(sys.argv) ## Define a top-level widget to hold everything w", "1, 3, 1) # plot goes on right side, spanning 3 rows #layout.addWidget(pw2,", "the layout in their proper positions layout.addWidget(btn, 0, 0) # button goes in", "import numpy as np ## Always start by initializing Qt (only once per", "= QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget() x = np.arange(0,1000,1) noise =", "= QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ##", "pg import numpy as np ## Always start by initializing Qt (only once", "bottom-left layout.addWidget(pltW1, 0, 1, 3, 1) # plot goes on right side, spanning", "on right side, spanning 3 rows #layout.addWidget(pw2, 3, 1, 2, 1) ## Display", "well to PySide) import sys import pyqtgraph as pg import numpy as np", "layout = QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to the layout in their proper", "= QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to the layout in their proper positions", "0, 0) # button goes in upper-left layout.addWidget(text, 1, 0) # text edit", "pg.PlotWidget() #pw2 = pg.PlotWidget() x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise", "to be placed inside btn = QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text') listw", "1, 0) # text edit goes in middle-left layout.addWidget(listw, 2, 0) # list", "#layout.addWidget(pw2, 3, 1, 2, 1) ## Display the widget as a new window", "by initializing Qt (only once per application) #app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance()", "noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ## Create a", "equally well to PySide) import sys import pyqtgraph as pg import numpy as", "applies equally well to PySide) import sys import pyqtgraph as pg import numpy", "Add widgets to the layout in their proper positions layout.addWidget(btn, 0, 0) #", "widgets to the layout in their proper positions layout.addWidget(btn, 0, 0) # button", "## Create some widgets to be placed inside btn = QtGui.QPushButton('press me') text", "w.setLayout(layout) ## Add widgets to the layout in their proper positions layout.addWidget(btn, 0,", "placed inside btn = QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text') listw = QtGui.QListWidget()", "np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ## Create a grid layout to manage the", "to PySide) import sys import pyqtgraph as pg import numpy as np ##", "hold everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background color pg.setConfigOption('background', 'w')", "QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to the layout in their proper positions layout.addWidget(btn,", "position layout = QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to the layout in their", "layout.addWidget(listw, 2, 0) # list widget goes in bottom-left layout.addWidget(pltW1, 0, 1, 3,", "(only once per application) #app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app is", "to the layout in their proper positions layout.addWidget(btn, 0, 0) # button goes", "initializing Qt (only once per application) #app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if", "per application) #app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app is None: app", "Define a top-level widget to hold everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() #", "as pg import numpy as np ## Always start by initializing Qt (only", "= QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget() x", "a grid layout to manage the widgets size and position layout = QtGui.QGridLayout()", "pg.PlotWidget() x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y,", "y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ## Create a grid layout to", "QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app is None: app = QtGui.QApplication(sys.argv) ## Define", "0) # text edit goes in middle-left layout.addWidget(listw, 2, 0) # list widget", "app = QtGui.QApplication(sys.argv) ## Define a top-level widget to hold everything w =", "QtCore.QCoreApplication.instance() if app is None: app = QtGui.QApplication(sys.argv) ## Define a top-level widget", "1) ## Display the widget as a new window w.show() ## Start the", "background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create some widgets to be placed", "from PyQt5 import QtGui, QtCore # (the example applies equally well to PySide)", "np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ## Create a grid layout", "positions layout.addWidget(btn, 0, 0) # button goes in upper-left layout.addWidget(text, 1, 0) #", "middle-left layout.addWidget(listw, 2, 0) # list widget goes in bottom-left layout.addWidget(pltW1, 0, 1,", "goes in bottom-left layout.addWidget(pltW1, 0, 1, 3, 1) # plot goes on right", "layout in their proper positions layout.addWidget(btn, 0, 0) # button goes in upper-left", "and position layout = QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to the layout in", "widget as a new window w.show() ## Start the Qt event loop app.exec_()", "# change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create some widgets to", "numpy as np ## Always start by initializing Qt (only once per application)", "w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k')", "listw = QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget() x = np.arange(0,1000,1) noise", "list widget goes in bottom-left layout.addWidget(pltW1, 0, 1, 3, 1) # plot goes", "QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget() x =", "QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create", "size and position layout = QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to the layout", "example applies equally well to PySide) import sys import pyqtgraph as pg import", "0) # button goes in upper-left layout.addWidget(text, 1, 0) # text edit goes", "rows #layout.addWidget(pw2, 3, 1, 2, 1) ## Display the widget as a new", "0) # list widget goes in bottom-left layout.addWidget(pltW1, 0, 1, 3, 1) #", "= pltW1.plot(x,y, color='g') ## Create a grid layout to manage the widgets size", "layout to manage the widgets size and position layout = QtGui.QGridLayout() w.setLayout(layout) ##", "import sys import pyqtgraph as pg import numpy as np ## Always start", "in their proper positions layout.addWidget(btn, 0, 0) # button goes in upper-left layout.addWidget(text,", "spanning 3 rows #layout.addWidget(pw2, 3, 1, 2, 1) ## Display the widget as", "# text edit goes in middle-left layout.addWidget(listw, 2, 0) # list widget goes", "1, 2, 1) ## Display the widget as a new window w.show() ##", "QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2", "once per application) #app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app is None:", "2, 0) # list widget goes in bottom-left layout.addWidget(pltW1, 0, 1, 3, 1)", "side, spanning 3 rows #layout.addWidget(pw2, 3, 1, 2, 1) ## Display the widget", "QtGui.QApplication(sys.argv) ## Define a top-level widget to hold everything w = QtGui.QWidget() w.move(400,100)", "layout.addWidget(text, 1, 0) # text edit goes in middle-left layout.addWidget(listw, 2, 0) #", "proper positions layout.addWidget(btn, 0, 0) # button goes in upper-left layout.addWidget(text, 1, 0)", "3 rows #layout.addWidget(pw2, 3, 1, 2, 1) ## Display the widget as a", "np ## Always start by initializing Qt (only once per application) #app =", "= QtGui.QApplication(sys.argv) ## Define a top-level widget to hold everything w = QtGui.QWidget()", "plot goes on right side, spanning 3 rows #layout.addWidget(pw2, 3, 1, 2, 1)", "be placed inside btn = QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text') listw =", "pyqtgraph as pg import numpy as np ## Always start by initializing Qt", "layout.addWidget(btn, 0, 0) # button goes in upper-left layout.addWidget(text, 1, 0) # text", "a top-level widget to hold everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change", "# button goes in upper-left layout.addWidget(text, 1, 0) # text edit goes in", "= np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ##", "= QtCore.QCoreApplication.instance() if app is None: app = QtGui.QApplication(sys.argv) ## Define a top-level", "widget goes in bottom-left layout.addWidget(pltW1, 0, 1, 3, 1) # plot goes on", "#w.showFullScreen() # change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create some widgets", "goes on right side, spanning 3 rows #layout.addWidget(pw2, 3, 1, 2, 1) ##", "## Display the widget as a new window w.show() ## Start the Qt", "manage the widgets size and position layout = QtGui.QGridLayout() w.setLayout(layout) ## Add widgets", "<reponame>apokhr/PumpProbe-analysis from PyQt5 import QtGui, QtCore # (the example applies equally well to", "inside btn = QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1", "app is None: app = QtGui.QApplication(sys.argv) ## Define a top-level widget to hold", "0, 1, 3, 1) # plot goes on right side, spanning 3 rows", "= QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app is None: app = QtGui.QApplication(sys.argv) ##", "as np ## Always start by initializing Qt (only once per application) #app", "text edit goes in middle-left layout.addWidget(listw, 2, 0) # list widget goes in", "PyQt5 import QtGui, QtCore # (the example applies equally well to PySide) import", "start by initializing Qt (only once per application) #app = QtGui.QApplication([]) app =", "# (the example applies equally well to PySide) import sys import pyqtgraph as", "#pw2 = pg.PlotWidget() x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1", "w.move(400,100) #w.showFullScreen() # change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create some", "Create a grid layout to manage the widgets size and position layout =", "the widgets size and position layout = QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to", "some widgets to be placed inside btn = QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter", "2, 1) ## Display the widget as a new window w.show() ## Start", "text') listw = QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget() x = np.arange(0,1000,1)", "QtCore # (the example applies equally well to PySide) import sys import pyqtgraph", "top-level widget to hold everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background", "edit goes in middle-left layout.addWidget(listw, 2, 0) # list widget goes in bottom-left", "# list widget goes in bottom-left layout.addWidget(pltW1, 0, 1, 3, 1) # plot", "pltW1.plot(x,y, color='g') ## Create a grid layout to manage the widgets size and", "3, 1, 2, 1) ## Display the widget as a new window w.show()", "## Define a top-level widget to hold everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen()", "layout.addWidget(pltW1, 0, 1, 3, 1) # plot goes on right side, spanning 3", "to hold everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background color pg.setConfigOption('background',", "= pg.PlotWidget() x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 =", "widgets to be placed inside btn = QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text')", "color='g') ## Create a grid layout to manage the widgets size and position", "the widget as a new window w.show() ## Start the Qt event loop", "everything w = QtGui.QWidget() w.move(400,100) #w.showFullScreen() # change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground',", "in bottom-left layout.addWidget(pltW1, 0, 1, 3, 1) # plot goes on right side,", "in upper-left layout.addWidget(text, 1, 0) # text edit goes in middle-left layout.addWidget(listw, 2,", "right side, spanning 3 rows #layout.addWidget(pw2, 3, 1, 2, 1) ## Display the", "Display the widget as a new window w.show() ## Start the Qt event", "= np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g') ## Create a grid", "change background color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create some widgets to be", "= pg.PlotWidget() #pw2 = pg.PlotWidget() x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y =", "Qt (only once per application) #app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app", "pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget() x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y", "their proper positions layout.addWidget(btn, 0, 0) # button goes in upper-left layout.addWidget(text, 1,", "application) #app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app is None: app =", "sys import pyqtgraph as pg import numpy as np ## Always start by", "btn = QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1 =", "PySide) import sys import pyqtgraph as pg import numpy as np ## Always", "'w') pg.setConfigOption('foreground', 'k') ## Create some widgets to be placed inside btn =", "## Create a grid layout to manage the widgets size and position layout", "#app = QtGui.QApplication([]) app = QtCore.QCoreApplication.instance() if app is None: app = QtGui.QApplication(sys.argv)", "goes in middle-left layout.addWidget(listw, 2, 0) # list widget goes in bottom-left layout.addWidget(pltW1,", "= QtGui.QPushButton('press me') text = QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1 = pg.PlotWidget()", "me') text = QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 =", "## Add widgets to the layout in their proper positions layout.addWidget(btn, 0, 0)", "1) # plot goes on right side, spanning 3 rows #layout.addWidget(pw2, 3, 1,", "widgets size and position layout = QtGui.QGridLayout() w.setLayout(layout) ## Add widgets to the", "is None: app = QtGui.QApplication(sys.argv) ## Define a top-level widget to hold everything", "x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1 y = np.sin(x/10)+noise plt1 = pltW1.plot(x,y, color='g')", "pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create some widgets to be placed inside btn", "button goes in upper-left layout.addWidget(text, 1, 0) # text edit goes in middle-left", "QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget() x = np.arange(0,1000,1) noise = np.random.normal(0,1,1000)/1", "text = QtGui.QLineEdit('enter text') listw = QtGui.QListWidget() pltW1 = pg.PlotWidget() #pw2 = pg.PlotWidget()", "# plot goes on right side, spanning 3 rows #layout.addWidget(pw2, 3, 1, 2,", "goes in upper-left layout.addWidget(text, 1, 0) # text edit goes in middle-left layout.addWidget(listw,", "(the example applies equally well to PySide) import sys import pyqtgraph as pg", "color pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') ## Create some widgets to be placed inside", "pg.setConfigOption('foreground', 'k') ## Create some widgets to be placed inside btn = QtGui.QPushButton('press", "plt1 = pltW1.plot(x,y, color='g') ## Create a grid layout to manage the widgets", "upper-left layout.addWidget(text, 1, 0) # text edit goes in middle-left layout.addWidget(listw, 2, 0)", "Always start by initializing Qt (only once per application) #app = QtGui.QApplication([]) app", "Create some widgets to be placed inside btn = QtGui.QPushButton('press me') text =", "to manage the widgets size and position layout = QtGui.QGridLayout() w.setLayout(layout) ## Add", "if app is None: app = QtGui.QApplication(sys.argv) ## Define a top-level widget to" ]
[ "= Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor() # perf = cross_validate(algo, data,", "\"\"\" from surprise import NormalPredictor from surprise import Dataset, Reader from surprise.model_selection import", "Dataset, Reader from surprise.model_selection import cross_validate import pandas as pd if __name__ ==", "@file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import NormalPredictor from", "data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor() # perf = cross_validate(algo,", "# print(line[1], line[23]) pred = algo.predict(line[1], line[23], r_ui=4, verbose=True) print(type(pred.est), pred.est) r.append(round(pred.est)) traindf['r']", "== '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is still needed but", "@contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import NormalPredictor", "A reader is still needed but only the rating_scale param is requiered. reader", "id, item id and ratings (in that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']],", "'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r = [] for line in traindf.itertuples():", "The columns must correspond to user id, item id and ratings (in that", "719)) # The columns must correspond to user id, item id and ratings", "通过估算正态分布 \"\"\" from surprise import NormalPredictor from surprise import Dataset, Reader from surprise.model_selection", "is requiered. reader = Reader(rating_scale=(422, 719)) # The columns must correspond to user", "pred = algo.predict(line[1], line[23], r_ui=4, verbose=True) print(type(pred.est), pred.est) r.append(round(pred.est)) traindf['r'] = r traindf.to_csv('D:/Desktop/DF/portrait/train_dataset1.csv')", "that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor() # perf", "import pandas as pd if __name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) #", "import NormalPredictor from surprise import Dataset, Reader from surprise.model_selection import cross_validate import pandas", "__name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is still needed", "order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor() # perf =", "as pd if __name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader", "and ratings (in that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo =", "requiered. reader = Reader(rating_scale=(422, 719)) # The columns must correspond to user id,", "but only the rating_scale param is requiered. reader = Reader(rating_scale=(422, 719)) # The", "= NormalPredictor() # perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset =", "# perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset)", "still needed but only the rating_scale param is requiered. reader = Reader(rating_scale=(422, 719))", "for line in traindf.itertuples(): # print(line[1], line[23]) pred = algo.predict(line[1], line[23], r_ui=4, verbose=True)", "from surprise import Dataset, Reader from surprise.model_selection import cross_validate import pandas as pd", "trainset = data.build_full_trainset() algo.fit(trainset) r = [] for line in traindf.itertuples(): # print(line[1],", "id and ratings (in that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo", "must correspond to user id, item id and ratings (in that order). data", "= Reader(rating_scale=(422, 719)) # The columns must correspond to user id, item id", "2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import NormalPredictor from surprise import Dataset,", "import Dataset, Reader from surprise.model_selection import cross_validate import pandas as pd if __name__", "measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r = [] for line in", "[] for line in traindf.itertuples(): # print(line[1], line[23]) pred = algo.predict(line[1], line[23], r_ui=4,", "algo.fit(trainset) r = [] for line in traindf.itertuples(): # print(line[1], line[23]) pred =", "r = [] for line in traindf.itertuples(): # print(line[1], line[23]) pred = algo.predict(line[1],", "'信用分']], reader) algo = NormalPredictor() # perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'],", "traindf.itertuples(): # print(line[1], line[23]) pred = algo.predict(line[1], line[23], r_ui=4, verbose=True) print(type(pred.est), pred.est) r.append(round(pred.est))", "'当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor() # perf = cross_validate(algo, data, cv=5, measures=['RMSE',", "perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r", "reader = Reader(rating_scale=(422, 719)) # The columns must correspond to user id, item", "rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import NormalPredictor from surprise", "Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import", "data.build_full_trainset() algo.fit(trainset) r = [] for line in traindf.itertuples(): # print(line[1], line[23]) pred", "reader is still needed but only the rating_scale param is requiered. reader =", "cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r = []", "surprise import Dataset, Reader from surprise.model_selection import cross_validate import pandas as pd if", "to user id, item id and ratings (in that order). data = Dataset.load_from_df(traindf[['用户编码',", "reader) algo = NormalPredictor() # perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True)", "18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import NormalPredictor from surprise import Dataset, Reader", "only the rating_scale param is requiered. reader = Reader(rating_scale=(422, 719)) # The columns", "pandas as pd if __name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A", "is still needed but only the rating_scale param is requiered. reader = Reader(rating_scale=(422,", "NormalPredictor() # perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset()", "traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is still needed but only the", "@time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import NormalPredictor from surprise import", "pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is still needed but only the rating_scale param", "item id and ratings (in that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader)", "= pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is still needed but only the rating_scale", "rating_scale param is requiered. reader = Reader(rating_scale=(422, 719)) # The columns must correspond", "in traindf.itertuples(): # print(line[1], line[23]) pred = algo.predict(line[1], line[23], r_ui=4, verbose=True) print(type(pred.est), pred.est)", "# A reader is still needed but only the rating_scale param is requiered.", "the rating_scale param is requiered. reader = Reader(rating_scale=(422, 719)) # The columns must", "print(line[1], line[23]) pred = algo.predict(line[1], line[23], r_ui=4, verbose=True) print(type(pred.est), pred.est) r.append(round(pred.est)) traindf['r'] =", "'__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is still needed but only", "Reader from surprise.model_selection import cross_validate import pandas as pd if __name__ == '__main__':", "line[23]) pred = algo.predict(line[1], line[23], r_ui=4, verbose=True) print(type(pred.est), pred.est) r.append(round(pred.est)) traindf['r'] = r", "Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor() # perf = cross_validate(algo, data, cv=5,", "verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r = [] for line in traindf.itertuples(): #", "line in traindf.itertuples(): # print(line[1], line[23]) pred = algo.predict(line[1], line[23], r_ui=4, verbose=True) print(type(pred.est),", "print(traindf.columns) # A reader is still needed but only the rating_scale param is", "cross_validate import pandas as pd if __name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns)", "needed but only the rating_scale param is requiered. reader = Reader(rating_scale=(422, 719)) #", "(in that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor() #", "cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r = [] for line", "param is requiered. reader = Reader(rating_scale=(422, 719)) # The columns must correspond to", "correspond to user id, item id and ratings (in that order). data =", "\"\"\" @author: Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from", "= [] for line in traindf.itertuples(): # print(line[1], line[23]) pred = algo.predict(line[1], line[23],", "if __name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is still", "surprise import NormalPredictor from surprise import Dataset, Reader from surprise.model_selection import cross_validate import", "data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r = [] for", "采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise import NormalPredictor from surprise import Dataset, Reader from", "<filename>apps/consumer_portrait/rs.py #!/usr/bin/python # encoding: utf-8 \"\"\" @author: Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16", "#!/usr/bin/python # encoding: utf-8 \"\"\" @author: Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32", "user id, item id and ratings (in that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数',", "pd if __name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv') print(traindf.columns) # A reader is", "Reader(rating_scale=(422, 719)) # The columns must correspond to user id, item id and", "import cross_validate import pandas as pd if __name__ == '__main__': traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv')", "algo = NormalPredictor() # perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset", "= data.build_full_trainset() algo.fit(trainset) r = [] for line in traindf.itertuples(): # print(line[1], line[23])", "@author: Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\" from surprise", "columns must correspond to user id, item id and ratings (in that order).", "from surprise.model_selection import cross_validate import pandas as pd if __name__ == '__main__': traindf", "encoding: utf-8 \"\"\" @author: Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布", "ratings (in that order). data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader) algo = NormalPredictor()", "= cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True) trainset = data.build_full_trainset() algo.fit(trainset) r =", "utf-8 \"\"\" @author: Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做, 通过估算正态分布 \"\"\"", "from surprise import NormalPredictor from surprise import Dataset, Reader from surprise.model_selection import cross_validate", "# The columns must correspond to user id, item id and ratings (in", "surprise.model_selection import cross_validate import pandas as pd if __name__ == '__main__': traindf =", "# encoding: utf-8 \"\"\" @author: Ian @contact:<EMAIL> @file: rs.py @time: 2019/3/16 18:32 采用无监督的方式做,", "NormalPredictor from surprise import Dataset, Reader from surprise.model_selection import cross_validate import pandas as" ]
[ "( (\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text =", "timezone from django.utils.translation import ugettext_lazy as _ from taggit.managers import TaggableManager from taggit.models", "models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question", "unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status", "class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = ( (\"draft\",", "django.urls import reverse from django.utils import timezone from django.utils.translation import ugettext_lazy as _", "models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self): return self.choice_text def get_absolute_url(self): return", "choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE,", "class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager):", "UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def", "Meta: ordering = ('-pub_date',) def __str__(self): return self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id])", "Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4,", "return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return user.is_superuser", "user.is_superuser or self.created_by == user def can_delete(self, user): return user.is_superuser or self.created_by ==", "user): return user.is_superuser or self.created_by == user class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4,", "- datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def", "get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return", "= models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True)", "= models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published =", "default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published = PublishedManager() tags =", "reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return user.is_superuser or self.created_by == user def can_delete(self,", "created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem)", "from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name = _(\"Tag\")", "class Meta: verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return", "choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published = PublishedManager() tags", "reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return", "taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name = _(\"Tag\") verbose_name_plural", "return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1) def get_update_url(self):", "== user def can_delete(self, user): return user.is_superuser or self.created_by == user class Choice(models.Model):", "default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by", "import ugettext_lazy as _ from taggit.managers import TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase", "= _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES =", "PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"),", "self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id =", "Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at", "= ('-pub_date',) def __str__(self): return self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self):", "class Meta: ordering = ('-pub_date',) def __str__(self): return self.question_text def get_absolute_url(self): return reverse('polls:question_detail',", "TaggedItemBase): class Meta: verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self):", "reverse from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from taggit.managers", "verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\")", "user class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes =", "def __str__(self): return self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date", "or self.created_by == user class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text =", "timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id])", "created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self): return self.choice_text def", "get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1) def", "updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects", "editable=False) question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now)", "from django.utils.translation import ugettext_lazy as _ from taggit.managers import TaggableManager from taggit.models import", "get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return user.is_superuser or self.created_by == user", "return user.is_superuser or self.created_by == user class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)", "id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at =", "votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self):", ") id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date')", "models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)", "= models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10,", "args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return user.is_superuser or self.created_by", "self.created_by == user def can_delete(self, user): return user.is_superuser or self.created_by == user class", "default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published',", "= models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects =", "GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\")", "import get_user_model from django.db import models from django.urls import reverse from django.utils import", "can_update(self, user): return user.is_superuser or self.created_by == user def can_delete(self, user): return user.is_superuser", "user): return user.is_superuser or self.created_by == user def can_delete(self, user): return user.is_superuser or", "user def can_delete(self, user): return user.is_superuser or self.created_by == user class Choice(models.Model): id", "import TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name", "django.contrib.auth import get_user_model from django.db import models from django.urls import reverse from django.utils", "from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from taggit.managers import", "= ( (\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text", "def can_update(self, user): return user.is_superuser or self.created_by == user def can_delete(self, user): return", "TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',) def __str__(self): return self.question_text def get_absolute_url(self): return", "= models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class", "_(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = (", "published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\")", "import datetime import uuid from django.contrib.auth import get_user_model from django.db import models from", "verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES", "ordering = ('-pub_date',) def __str__(self): return self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def", "models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date", "taggit.managers import TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta:", "import reverse from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from", "or self.created_by == user def can_delete(self, user): return user.is_superuser or self.created_by == user", "get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\", \"Published\"),", "models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self): return self.choice_text", "self.pub_date >= timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return", "= models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self): return self.choice_text def get_absolute_url(self):", "default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question =", "pub_date = models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status =", "def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self, user):", "can_delete(self, user): return user.is_superuser or self.created_by == user class Choice(models.Model): id = models.UUIDField(primary_key=True,", "import uuid from django.contrib.auth import get_user_model from django.db import models from django.urls import", "as _ from taggit.managers import TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase,", "datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self,", "import models from django.urls import reverse from django.utils import timezone from django.utils.translation import", "= PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',) def __str__(self): return", "question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self): return self.choice_text def get_absolute_url(self): return reverse('choice_detail',", "id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date", "models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def", "ugettext_lazy as _ from taggit.managers import TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class", "models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager()", "import timezone from django.utils.translation import ugettext_lazy as _ from taggit.managers import TaggableManager from", "__str__(self): return self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date >=", "datetime import uuid from django.contrib.auth import get_user_model from django.db import models from django.urls", "super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id", "question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now) created_at", "models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published = PublishedManager()", "def can_delete(self, user): return user.is_superuser or self.created_by == user class Choice(models.Model): id =", "def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1)", "def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\",", "class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0)", "models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta:", "class Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True,", "= models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(),", "def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id])", "return reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return user.is_superuser or self.created_by == user def", "editable=False) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question,", "self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date >= timezone.now() -", "return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model): STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\", \"Published\"), )", "PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',) def __str__(self): return self.question_text", "was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def", "\"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250,", "TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name =", "(\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200)", "on_delete=models.CASCADE) objects = models.Manager() published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering", "= models.CharField(max_length=200) votes = models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')", "= _(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class", "models.Manager() published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',) def", ">= timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete',", "tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',) def __str__(self): return self.question_text def", "from django.urls import reverse from django.utils import timezone from django.utils.translation import ugettext_lazy as", "models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True)", "== user class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200) votes", "uuid from django.contrib.auth import get_user_model from django.db import models from django.urls import reverse", "= models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now) created_at =", "from django.contrib.auth import get_user_model from django.db import models from django.urls import reverse from", "get_user_model from django.db import models from django.urls import reverse from django.utils import timezone", "STATUS_CHOICES = ( (\"draft\", \"Draft\"), (\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)", "slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at", "return user.is_superuser or self.created_by == user def can_delete(self, user): return user.is_superuser or self.created_by", "user.is_superuser or self.created_by == user class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text", "models from django.urls import reverse from django.utils import timezone from django.utils.translation import ugettext_lazy", "Meta: verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager,", "def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return user.is_superuser or self.created_by ==", "('-pub_date',) def __str__(self): return self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return", "return self.pub_date >= timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update', args=[self.id]) def get_delete_url(self):", "= models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self): return self.choice_text def get_absolute_url(self): return reverse('choice_detail', args=str([self.id]))", "args=[self.id]) def can_update(self, user): return user.is_superuser or self.created_by == user def can_delete(self, user):", "= models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug = models.SlugField(max_length=250, unique_for_date='pub_date') pub_date =", "published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',) def __str__(self):", "created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by =", "self.created_by == user class Choice(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) choice_text = models.CharField(max_length=200)", "args=[self.id]) def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1) def get_update_url(self): return reverse('polls:question_update',", "return self.question_text def get_absolute_url(self): return reverse('polls:question_detail', args=[self.id]) def was_published_recently(self): return self.pub_date >= timezone.now()", "from django.db import models from django.urls import reverse from django.utils import timezone from", "models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES,", "= models.Manager() published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',)", "models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True)", "reverse('polls:question_update', args=[self.id]) def get_delete_url(self): return reverse('polls:question_delete', args=[self.id]) def can_update(self, user): return user.is_superuser or", "= models.IntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices') def __str__(self): return", "TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name = _(\"Tag\") verbose_name_plural = _(\"Tags\") class", "= TaggableManager(through=UUIDTaggedItem) class Meta: ordering = ('-pub_date',) def __str__(self): return self.question_text def get_absolute_url(self):", "import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class Meta: verbose_name = _(\"Tag\") verbose_name_plural =", "\"Draft\"), (\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug", "status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=\"draft\") created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) objects = models.Manager() published", "_(\"Tag\") verbose_name_plural = _(\"Tags\") class PublishedManager(models.Manager): def get_queryset(self): return super(PublishedManager, self).get_queryset().filter(status=\"published\") class Question(models.Model):", "django.db import models from django.urls import reverse from django.utils import timezone from django.utils.translation", "= models.SlugField(max_length=250, unique_for_date='pub_date') pub_date = models.DateTimeField('date published', default=timezone.now) created_at = models.DateTimeField(auto_now_add=True) updated_at =", "from taggit.managers import TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase): class", "_ from taggit.managers import TaggableManager from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase):", "django.utils.translation import ugettext_lazy as _ from taggit.managers import TaggableManager from taggit.models import GenericUUIDTaggedItemBase,", "django.utils import timezone from django.utils.translation import ugettext_lazy as _ from taggit.managers import TaggableManager", "(\"published\", \"Published\"), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) question_text = models.CharField(max_length=200) slug =", "objects = models.Manager() published = PublishedManager() tags = TaggableManager(through=UUIDTaggedItem) class Meta: ordering =" ]
[ "sum of each round of errors. The difference of first and second will", "solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors) second_sum = sum(second_errors) third_sum = sum(third_errors) print", "'<NAME>' ''' https://codeforces.com/problemset/problem/519/B Solution: Calculate the sum of each round of errors. The", "= '<NAME>' ''' https://codeforces.com/problemset/problem/519/B Solution: Calculate the sum of each round of errors.", "second and third will give the error resolved by third round. ''' def", "error resolved by second round. Similarly, the difference of second and third will", "https://codeforces.com/problemset/problem/519/B Solution: Calculate the sum of each round of errors. The difference of", "difference of second and third will give the error resolved by third round.", "of each round of errors. The difference of first and second will give", "of second and third will give the error resolved by third round. '''", "second round. Similarly, the difference of second and third will give the error", "first_sum = sum(first_errors) second_sum = sum(second_errors) third_sum = sum(third_errors) print first_sum - second_sum", "raw_input().split(\" \")) second_errors = map(int, raw_input().split(\" \")) third_errors = map(int, raw_input().split(\" \")) solve(first_errors,", "the sum of each round of errors. The difference of first and second", "difference of first and second will give the error resolved by second round.", "''' https://codeforces.com/problemset/problem/519/B Solution: Calculate the sum of each round of errors. The difference", "give the error resolved by third round. ''' def solve(first_errors, second_errors, third_errors): first_sum", "<filename>problems/B/AAndBAndCompilationErrors.py __author__ = '<NAME>' ''' https://codeforces.com/problemset/problem/519/B Solution: Calculate the sum of each round", "round. Similarly, the difference of second and third will give the error resolved", "second_sum = sum(second_errors) third_sum = sum(third_errors) print first_sum - second_sum print second_sum -", "the error resolved by second round. Similarly, the difference of second and third", "sum(second_errors) third_sum = sum(third_errors) print first_sum - second_sum print second_sum - third_sum if", "error resolved by third round. ''' def solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors)", "__name__ == \"__main__\": raw_input() # ignoring n first_errors = map(int, raw_input().split(\" \")) second_errors", "The difference of first and second will give the error resolved by second", "third_errors): first_sum = sum(first_errors) second_sum = sum(second_errors) third_sum = sum(third_errors) print first_sum -", "sum(first_errors) second_sum = sum(second_errors) third_sum = sum(third_errors) print first_sum - second_sum print second_sum", "each round of errors. The difference of first and second will give the", "first_sum - second_sum print second_sum - third_sum if __name__ == \"__main__\": raw_input() #", "third will give the error resolved by third round. ''' def solve(first_errors, second_errors,", "= map(int, raw_input().split(\" \")) second_errors = map(int, raw_input().split(\" \")) third_errors = map(int, raw_input().split(\"", "second will give the error resolved by second round. Similarly, the difference of", "print first_sum - second_sum print second_sum - third_sum if __name__ == \"__main__\": raw_input()", "print second_sum - third_sum if __name__ == \"__main__\": raw_input() # ignoring n first_errors", "# ignoring n first_errors = map(int, raw_input().split(\" \")) second_errors = map(int, raw_input().split(\" \"))", "by third round. ''' def solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors) second_sum =", "if __name__ == \"__main__\": raw_input() # ignoring n first_errors = map(int, raw_input().split(\" \"))", "third_sum if __name__ == \"__main__\": raw_input() # ignoring n first_errors = map(int, raw_input().split(\"", "first_errors = map(int, raw_input().split(\" \")) second_errors = map(int, raw_input().split(\" \")) third_errors = map(int,", "give the error resolved by second round. Similarly, the difference of second and", "\"__main__\": raw_input() # ignoring n first_errors = map(int, raw_input().split(\" \")) second_errors = map(int,", "second_errors, third_errors): first_sum = sum(first_errors) second_sum = sum(second_errors) third_sum = sum(third_errors) print first_sum", "- second_sum print second_sum - third_sum if __name__ == \"__main__\": raw_input() # ignoring", "Solution: Calculate the sum of each round of errors. The difference of first", "of errors. The difference of first and second will give the error resolved", "by second round. Similarly, the difference of second and third will give the", "and third will give the error resolved by third round. ''' def solve(first_errors,", "= sum(second_errors) third_sum = sum(third_errors) print first_sum - second_sum print second_sum - third_sum", "of first and second will give the error resolved by second round. Similarly,", "second_errors = map(int, raw_input().split(\" \")) third_errors = map(int, raw_input().split(\" \")) solve(first_errors, second_errors, third_errors)", "first and second will give the error resolved by second round. Similarly, the", "will give the error resolved by second round. Similarly, the difference of second", "== \"__main__\": raw_input() # ignoring n first_errors = map(int, raw_input().split(\" \")) second_errors =", "third round. ''' def solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors) second_sum = sum(second_errors)", "raw_input() # ignoring n first_errors = map(int, raw_input().split(\" \")) second_errors = map(int, raw_input().split(\"", "Calculate the sum of each round of errors. The difference of first and", "map(int, raw_input().split(\" \")) second_errors = map(int, raw_input().split(\" \")) third_errors = map(int, raw_input().split(\" \"))", "the error resolved by third round. ''' def solve(first_errors, second_errors, third_errors): first_sum =", "= sum(first_errors) second_sum = sum(second_errors) third_sum = sum(third_errors) print first_sum - second_sum print", "the difference of second and third will give the error resolved by third", "resolved by third round. ''' def solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors) second_sum", "second_sum - third_sum if __name__ == \"__main__\": raw_input() # ignoring n first_errors =", "= sum(third_errors) print first_sum - second_sum print second_sum - third_sum if __name__ ==", "second_sum print second_sum - third_sum if __name__ == \"__main__\": raw_input() # ignoring n", "third_sum = sum(third_errors) print first_sum - second_sum print second_sum - third_sum if __name__", "sum(third_errors) print first_sum - second_sum print second_sum - third_sum if __name__ == \"__main__\":", "def solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors) second_sum = sum(second_errors) third_sum = sum(third_errors)", "errors. The difference of first and second will give the error resolved by", "- third_sum if __name__ == \"__main__\": raw_input() # ignoring n first_errors = map(int,", "''' def solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors) second_sum = sum(second_errors) third_sum =", "__author__ = '<NAME>' ''' https://codeforces.com/problemset/problem/519/B Solution: Calculate the sum of each round of", "round. ''' def solve(first_errors, second_errors, third_errors): first_sum = sum(first_errors) second_sum = sum(second_errors) third_sum", "round of errors. The difference of first and second will give the error", "\")) second_errors = map(int, raw_input().split(\" \")) third_errors = map(int, raw_input().split(\" \")) solve(first_errors, second_errors,", "Similarly, the difference of second and third will give the error resolved by", "ignoring n first_errors = map(int, raw_input().split(\" \")) second_errors = map(int, raw_input().split(\" \")) third_errors", "resolved by second round. Similarly, the difference of second and third will give", "and second will give the error resolved by second round. Similarly, the difference", "n first_errors = map(int, raw_input().split(\" \")) second_errors = map(int, raw_input().split(\" \")) third_errors =", "will give the error resolved by third round. ''' def solve(first_errors, second_errors, third_errors):" ]
[ "import urllib2 # 设置代理 # authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"})", "= urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\" request = urllib2.Request(url)", "urllib2 # 设置代理 # authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) #", "authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url", "构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应 response", "= urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能", "opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\" request =", "# authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\":", "\"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\" request", "设置代理 # authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler =", "# 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应", "<filename>study/day02_spider/03_urllib2_authproxyhandler.py # coding=utf-8 import urllib2 # 设置代理 # authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler", "urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener =", "= \"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应 response = opener.open(request) # 获取响应的html信息 html", "request = urllib2.Request(url) # 获取响应 response = opener.open(request) # 获取响应的html信息 html = response.read()", "= urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url =", "= urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener", "urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\" request = urllib2.Request(url) #", "urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\"", "authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"})", "\"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler)", "# 设置代理 # authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler", "urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener)", "获取请求 url = \"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应 response = opener.open(request) #", "\"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应 response = opener.open(request) # 获取响应的html信息 html =", "# 获取请求 url = \"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应 response = opener.open(request)", "# authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) # 获取请求", "= urllib2.Request(url) # 获取响应 response = opener.open(request) # 获取响应的html信息 html = response.read() print(html)", "urllib2.install_opener(opener) # 获取请求 url = \"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应 response =", "authproxy_handler = urllib2.ProxyHandler({\"http\": \"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) #", "url = \"http://www.baidu.com/\" request = urllib2.Request(url) # 获取响应 response = opener.open(request) # 获取响应的html信息", "\"192.168.3.11:9000\"}) # authproxy_handler = urllib2.ProxyHandler({\"http\": \"172.16.31.10:16816\"}) opener = urllib2.build_opener(authproxy_handler) # 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能 urllib2.install_opener(opener) #", "# coding=utf-8 import urllib2 # 设置代理 # authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler =", "coding=utf-8 import urllib2 # 设置代理 # authproxy_handler = urllib2.ProxyHandler({\"http\": \"mr_mao_hacker:sffqry9r@172.16.31.10:16816\"}) authproxy_handler = urllib2.ProxyHandler({\"http\":" ]
[]
[ "string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations s, n", "Itertools > itertools.combinations() # Print all the combinations of a string using itertools.", "> Itertools > itertools.combinations() # Print all the combinations of a string using", "input().split() for k in range(1, int(n) + 1): for i in combinations(sorted(s), k):", "combinations of a string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import", "# Python > Itertools > itertools.combinations() # Print all the combinations of a", "n = input().split() for k in range(1, int(n) + 1): for i in", "# https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations s, n = input().split() for k", "https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations s, n = input().split() for k in", "combinations s, n = input().split() for k in range(1, int(n) + 1): for", "for k in range(1, int(n) + 1): for i in combinations(sorted(s), k): print(\"\".join(i))", "# from itertools import combinations s, n = input().split() for k in range(1,", "itertools.combinations() # Print all the combinations of a string using itertools. # #", "> itertools.combinations() # Print all the combinations of a string using itertools. #", "a string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations s,", "from itertools import combinations s, n = input().split() for k in range(1, int(n)", "the combinations of a string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools", "all the combinations of a string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from", "import combinations s, n = input().split() for k in range(1, int(n) + 1):", "of a string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations", "itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations s, n = input().split()", "itertools import combinations s, n = input().split() for k in range(1, int(n) +", "s, n = input().split() for k in range(1, int(n) + 1): for i", "Print all the combinations of a string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem #", "# Print all the combinations of a string using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem", "<reponame>PingHuskar/hackerrank<filename>python/py-itertools/itertools-combinations.py # Python > Itertools > itertools.combinations() # Print all the combinations of", "using itertools. # # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations s, n =", "Python > Itertools > itertools.combinations() # Print all the combinations of a string", "# # https://www.hackerrank.com/challenges/itertools-combinations/problem # from itertools import combinations s, n = input().split() for", "= input().split() for k in range(1, int(n) + 1): for i in combinations(sorted(s)," ]
[ "md5 import os import sys # The following 4 lines are necessary until", "'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line):", "= [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\",", "line[19] if not multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag = 1 payload_flag =", "browser, to get past UCS 403 errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT", "Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a unique signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest()", "a unique signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data", "satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"]", "\"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in", "the UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows(): norad_number", "row] + [record_fingerprint] data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make it print", "payload_flag, ops_status_code, name, source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code,", "= load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb = fix_discrepencies(satdb, satcat) update_ucs_satdb_table(db, satdb) log.info(\"Script", "\"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\",", "satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") #", "loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\"", "satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101) < 3: satdb.loc[i, \"name\"]", "satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and loading into memory...\") satcat_url =", "1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11] norad_number =", "apogee, perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat", "datetime import datetime import logging log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use", "payload_flag: payload_flag = 0 else: payload_flag = 1 ops_status_code = line[21] name =", "payload_flag = 0 else: payload_flag = 1 ops_status_code = line[21] name = line[23:47]", "the celestrak_satcat table...\") data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint =", "= \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\",", "the console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table()", "\"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb =", "0 else: multiple_name_flag = 1 payload_flag = line[20] if not payload_flag: payload_flag =", "\"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\")", "found in the Celestrak Catalog. Relying on SatDB data only.\"\"\" ) return satdb", "[record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def", "try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try:", "= total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating", "ucs_satdb table...\") total_rows = 0 data_batch = [] for row in df.itertuples(index=False, name=None):", "db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11] norad_number = line[13:18] multiple_name_flag = line[19] if", "make it print to the console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db =", "multiple_name_flag = 1 payload_flag = line[20] if not payload_flag: payload_flag = 0 else:", "perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\")", "try: return float(val.replace(\",\", \"\")) except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try:", "satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] =", "float: return val val = val.strip() try: return int(val.replace(\",\", \"\")) except: pass try:", "len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows", "# https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url,", "try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if", "return satdb def format(val): if pd.isna(val): return None if type(val).__module__ == \"numpy\": val", "0: db.add_celestrak_satcat_batch(data_batch) # make it print to the console. console = logging.StreamHandler() log.addHandler(console)", "i in row] + [record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch)", "line[75:85] orbit_period_minutes = line[87:94] inclination_deg = line[96:101] apogee = line[103:109] perigee = line[111:117]", "for i in row] + [record_fingerprint] data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) #", "total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df):", "launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple def", "df): log.info(\"Updating the celestrak_satcat table...\") data_batch = [] for row in df.itertuples(index=False, name=None):", "= [] for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e in", "and loading into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error", "row)) savable = [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) total_rows =", "row] + [record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) > 0:", "# make it print to the console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db", "os.path.abspath(\"../../trusat-config.yaml\") # Use this as our browser, to get past UCS 403 errors", "def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows = 0 data_batch = []", "= 0 data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e)", "def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows = 0 data_batch =", "0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows = 0", "= 1 ops_status_code = line[21] name = line[23:47] source = line[49:54] launch_date =", "3: satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite with", "satcat_tuple = ( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date, launch_site, decay_date,", "if pd.isna(val): return None if type(val).__module__ == \"numpy\": val = val.item() if type(val)", "df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the UCS data...\") # discrepencies_url =", "\"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101) <", "db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db,", "data and loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\",", "currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io import StringIO import pandas as", "\"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\",", "f\"\"\"Satellite with norad number {norad_number} in satdb is not found in the Celestrak", "= 0 else: multiple_name_flag = 1 payload_flag = line[20] if not payload_flag: payload_flag", "else: payload_flag = 1 ops_status_code = line[21] name = line[23:47] source = line[49:54]", "row = [format(q) for q in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data, columns=[", "\"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\",", "\"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\",", "len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11] norad_number = line[13:18] multiple_name_flag", "line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading into memory...\") #", "in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"]", "sys # The following 4 lines are necessary until our modules are public", ") return satdb def format(val): if pd.isna(val): return None if type(val).__module__ == \"numpy\":", "q in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\",", "0 data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a unique", "(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\"", "pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass", "= [] for row in satcat.itertuples(index=False, name=None): row = [format(q) for q in", "return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\") data_batch = [] for", "\"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\",", "data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database,", "= line[96:101] apogee = line[103:109] perigee = line[111:117] radar_crosssec = line[119:127] orbit_status_code =", "= line[129:132] satcat_tuple = ( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date,", "[record_fingerprint] data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make it print to the", "= load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb = fix_discrepencies(satdb, satcat)", "\"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"]", "satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101) < 3: satdb.loc[i,", "update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\") data_batch = [] for row in df.itertuples(index=False,", "get past UCS 403 errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)", "requests from datetime import datetime import logging log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\")", "fingerprint_line(\"\".join(str(e) for e in row)) savable = [format(i) for i in row] +", "io import StringIO import pandas as pd import requests from datetime import datetime", "http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb =", "val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows = 0 data_batch =", "[format(q) for q in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\",", "satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\") data_batch = [] for row", "line[20] if not payload_flag: payload_flag = 0 else: payload_flag = 1 ops_status_code =", "total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the", "+ [record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch)", "df): log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows = 0 data_batch = [] for", "line[23:47] source = line[49:54] launch_date = line[56:66] launch_site = line[69:73] decay_date = line[75:85]", "SatDB data only.\"\"\" ) return satdb def format(val): if pd.isna(val): return None if", "\"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101) < 3: satdb.loc[i, \"name\"] =", "radar_crosssec, orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\") data_batch", "def fingerprint_line(line): \"\"\" Creates a unique signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def", "{norad_number} in satdb is not found in the Celestrak Catalog. Relying on SatDB", "int(val.replace(\",\", \"\")) except: pass try: return float(val.replace(\",\", \"\")) except: pass try: return datetime.strptime(val,", "\"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\",", "line[96:101] apogee = line[103:109] perigee = line[111:117] radar_crosssec = line[119:127] orbit_status_code = line[129:132]", "load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb = fix_discrepencies(satdb, satcat) update_ucs_satdb_table(db, satdb) log.info(\"Script Complete\")", "for i, satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i,", "memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data", "or type(val) is float: return val val = val.strip() try: return int(val.replace(\",\", \"\"))", "launch_date = line[56:66] launch_site = line[69:73] decay_date = line[75:85] orbit_period_minutes = line[87:94] inclination_deg", "logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat", "database from io import StringIO import pandas as pd import requests from datetime", "satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import", "data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"])", "\"N/A\": return None return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows", "norad_number = line[13:18] multiple_name_flag = line[19] if not multiple_name_flag: multiple_name_flag = 0 else:", "= \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = []", "\"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\",", "= line[21] name = line[23:47] source = line[49:54] launch_date = line[56:66] launch_site =", "# The following 4 lines are necessary until our modules are public import", "in satdb is not found in the Celestrak Catalog. Relying on SatDB data", ") df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the UCS data...\")", "= line[75:85] orbit_period_minutes = line[87:94] inclination_deg = line[96:101] apogee = line[103:109] perigee =", "\"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df", "from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading into", "\"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"]", "= total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg =", "line[56:66] launch_site = line[69:73] decay_date = line[75:85] orbit_period_minutes = line[87:94] inclination_deg = line[96:101]", "UCS 403 errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,", "\"%Y/%m/%d\").date() except: pass if not val or val == \"N/A\": return None return", "return float(val.replace(\",\", \"\")) except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return", "i, satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"]", "\"source_5\", \"source_6\", \"source_7\", ] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and", "Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a unique signature from a line.\"\"\" return", "for i in row] + [record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if", "\"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random", "= line[111:117] radar_crosssec = line[119:127] orbit_status_code = line[129:132] satcat_tuple = ( intl_desg, norad_number,", "log.warning( f\"\"\"Satellite with norad number {norad_number} in satdb is not found in the", "datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val,", "\"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\",", "satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:,", "if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11] norad_number = line[13:18]", "int or type(val) is float: return val val = val.strip() try: return int(val.replace(\",\",", "SATCAT data and loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url,", "inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io import StringIO import pandas", "df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows = 0 data_batch = [] for row", "in row] + [record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) >", "https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\",", "\"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\",", "total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg", "[] for row in satcat.itertuples(index=False, name=None): row = [format(q) for q in parse_celestrak_row(row[0])]", "\"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return satdb def", "not val or val == \"N/A\": return None return val def update_ucs_satdb_raw_table(Database, df):", "# satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s),", "name=None): row = [format(q) for q in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data,", "satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"]", "lines are necessary until our modules are public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))", "in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e in row)) savable = [format(i)", "in row)) savable = [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) total_rows", "= [format(q) for q in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data, columns=[ \"intl_desg\",", "if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make it print to the console. console", "def format(val): if pd.isna(val): return None if type(val).__module__ == \"numpy\": val = val.item()", "data and loading into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" #", "(KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a unique signature from", "= 0 else: payload_flag = 1 ops_status_code = line[21] name = line[23:47] source", "our modules are public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from", "pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\",", "encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns", "\"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching", "\"name\"] = \"BLAH BLAH BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite with norad number", "our browser, to get past UCS 403 errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows", "source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple", "ops_status_code = line[21] name = line[23:47] source = line[49:54] launch_date = line[56:66] launch_site", "line[21] name = line[23:47] source = line[49:54] launch_date = line[56:66] launch_site = line[69:73]", "\"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], )", "the Celestrak Catalog. Relying on SatDB data only.\"\"\" ) return satdb def format(val):", "in the Celestrak Catalog. Relying on SatDB data only.\"\"\" ) return satdb def", "\"source_7\", ] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and loading into", "decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database, df):", "def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\"", "+ 1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11] norad_number", "delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = [] for row in satcat.itertuples(index=False, name=None): row =", "], ) df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the UCS", "multiple_name_flag = 0 else: multiple_name_flag = 1 payload_flag = line[20] if not payload_flag:", "multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag = 1 payload_flag = line[20] if not", "\"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df def fix_discrepencies(satdb,", "engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = [] for row in satcat.itertuples(index=False, name=None): row", "return None if type(val).__module__ == \"numpy\": val = val.item() if type(val) is int", "{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def", "val.item() if type(val) is int or type(val) is float: return val val =", "StringIO import pandas as pd import requests from datetime import datetime import logging", "pandas as pd import requests from datetime import datetime import logging log =", "[] for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e in row))", "\"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies", "= satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] =", "db.add_celestrak_satcat_batch(data_batch) # make it print to the console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG)", "return val val = val.strip() try: return int(val.replace(\",\", \"\")) except: pass try: return", "print to the console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table()", "following 4 lines are necessary until our modules are public import inspect currentdir", "\"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\",", "delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format)", "until our modules are public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database", "modules are public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io", "not found in the Celestrak Catalog. Relying on SatDB data only.\"\"\" ) return", "df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the UCS data...\") #", "+ [record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch)", "s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\")", "savable = [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) if len(data_batch) >", "\"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ]", "val = val.strip() try: return int(val.replace(\",\", \"\")) except: pass try: return float(val.replace(\",\", \"\"))", "\"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"]", "satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb = fix_discrepencies(satdb, satcat) update_ucs_satdb_table(db, satdb)", "= 1 payload_flag = line[20] if not payload_flag: payload_flag = 0 else: payload_flag", "Catalog. Relying on SatDB data only.\"\"\" ) return satdb def format(val): if pd.isna(val):", "data only.\"\"\" ) return satdb def format(val): if pd.isna(val): return None if type(val).__module__", "from io import StringIO import pandas as pd import requests from datetime import", "df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e in row)) savable = [format(i) for", "encoding=\"Windows-1252\" ) data = [] for row in satcat.itertuples(index=False, name=None): row = [format(q)", "= satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] =", "pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not val or val ==", "[format(i) for i in row] + [record_fingerprint] data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch)", "intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee,", "row)) savable = [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) if len(data_batch)", "orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\") data_batch =", "inclination_deg = line[96:101] apogee = line[103:109] perigee = line[111:117] radar_crosssec = line[119:127] orbit_status_code", "orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating", "BLAH BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite with norad number {norad_number} in satdb", "+ 1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected)", "val val = val.strip() try: return int(val.replace(\",\", \"\")) except: pass try: return float(val.replace(\",\",", "apogee = line[103:109] perigee = line[111:117] radar_crosssec = line[119:127] orbit_status_code = line[129:132] satcat_tuple", "log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat =", "for q in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\",", "= pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [ \"name\",", "BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite with norad number {norad_number} in satdb is", "= logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this as our browser, to get", "except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except:", "it print to the console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG)", "1 payload_flag = line[20] if not payload_flag: payload_flag = 0 else: payload_flag =", "try: return int(val.replace(\",\", \"\")) except: pass try: return float(val.replace(\",\", \"\")) except: pass try:", "logging log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this as our browser,", "satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [", "try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not val or val == \"N/A\":", "discrepencies in the UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in", "\"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\",", "Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a unique signature from a line.\"\"\"", "101) < 3: satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\" except (KeyError, ValueError): log.warning(", "\"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\",", "satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb = fix_discrepencies(satdb,", "signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading", "= line[23:47] source = line[49:54] launch_date = line[56:66] launch_site = line[69:73] decay_date =", "None return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows = 0", "import sys # The following 4 lines are necessary until our modules are", "return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not val or val == \"N/A\": return", "if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb table...\")", "(KeyError, ValueError): log.warning( f\"\"\"Satellite with norad number {norad_number} in satdb is not found", "satdb def format(val): if pd.isna(val): return None if type(val).__module__ == \"numpy\": val =", "data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e", "if not payload_flag: payload_flag = 0 else: payload_flag = 1 ops_status_code = line[21]", "= val.strip() try: return int(val.replace(\",\", \"\")) except: pass try: return float(val.replace(\",\", \"\")) except:", "into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" )", "= fingerprint_line(\"\".join(str(e) for e in row)) savable = [format(i) for i in row]", "log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this as our browser, to", "pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\",", "= [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) total_rows = total_rows +", "\"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return satdb def load_celestrak_satcat_data():", "or val == \"N/A\": return None return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the", "def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\") data_batch = [] for row in", "\"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return", "+ [record_fingerprint] data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make it print to", "log.info(\"Fetching UCSATDB data and loading into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url =", "\"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\",", "= line[56:66] launch_site = line[69:73] decay_date = line[75:85] orbit_period_minutes = line[87:94] inclination_deg =", "data = [] for row in satcat.itertuples(index=False, name=None): row = [format(q) for q", "multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec,", "except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not val or val", "\"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\",", "this as our browser, to get past UCS 403 errors http_headers = {'User-Agent':", "is float: return val val = val.strip() try: return int(val.replace(\",\", \"\")) except: pass", "Creates a unique signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB", "norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"]", "= satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] =", "the ucs_satdb_raw table...\") total_rows = 0 data_batch = [] for row in df.itertuples(index=False,", "launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database,", "\"BLAH BLAH BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite with norad number {norad_number} in", "fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for", "log.info(\"Updating the celestrak_satcat table...\") data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint", "return None return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows =", "are public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io import", "\"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ],", "\"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\",", "\"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the", "errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i,", "celestrak_satcat table...\") data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e)", "satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if random.randint(1,", "\"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT", "for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e in row)) savable", "db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows = 0 data_batch", "except: pass try: return float(val.replace(\",\", \"\")) except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except:", "= val.item() if type(val) is int or type(val) is float: return val val", "log.info(\"Fetching CELESTRAK SATCAT data and loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat =", "Celestrak Catalog. Relying on SatDB data only.\"\"\" ) return satdb def format(val): if", "= satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101) < 3:", "= line[69:73] decay_date = line[75:85] orbit_period_minutes = line[87:94] inclination_deg = line[96:101] apogee =", "line[129:132] satcat_tuple = ( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date, launch_site,", "= satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if", "console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb", "return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows = 0 data_batch", "line[49:54] launch_date = line[56:66] launch_site = line[69:73] decay_date = line[75:85] orbit_period_minutes = line[87:94]", "name = line[23:47] source = line[49:54] launch_date = line[56:66] launch_site = line[69:73] decay_date", "log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data()", "CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this as our browser, to get past UCS", "not payload_flag: payload_flag = 0 else: payload_flag = 1 ops_status_code = line[21] name", "\"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\",", "in satcat.itertuples(index=False, name=None): row = [format(q) for q in parse_celestrak_row(row[0])] data.append(row) df =", "in the UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows():", "Relying on SatDB data only.\"\"\" ) return satdb def format(val): if pd.isna(val): return", "= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io import StringIO import pandas as pd", "import datetime import logging log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this", "6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a", "\"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing", "source = line[49:54] launch_date = line[56:66] launch_site = line[69:73] decay_date = line[75:85] orbit_period_minutes", "in row)) savable = [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) if", "= logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data()", "\"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"]", "float(val.replace(\",\", \"\")) except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return datetime.strptime(val,", "like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a unique signature from a", "= os.path.abspath(\"../../trusat-config.yaml\") # Use this as our browser, to get past UCS 403", "[ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\",", "\"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK", "satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101)", "\"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not val or", "satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i,", "= database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb)", "past UCS 403 errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36", "else: multiple_name_flag = 1 payload_flag = line[20] if not payload_flag: payload_flag = 0", "= line[49:54] launch_date = line[56:66] launch_site = line[69:73] decay_date = line[75:85] orbit_period_minutes =", "\"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data", "datetime import logging log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this as", "val.strip() try: return int(val.replace(\",\", \"\")) except: pass try: return float(val.replace(\",\", \"\")) except: pass", "NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates", "as our browser, to get past UCS 403 errors http_headers = {'User-Agent': 'Mozilla/5.0", "\"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\",", "\"\")) except: pass try: return float(val.replace(\",\", \"\")) except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date()", "name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e in row)) savable = [format(i) for i", "satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\",", "0 else: payload_flag = 1 ops_status_code = line[21] name = line[23:47] source =", "except: pass if not val or val == \"N/A\": return None return val", "import random if random.randint(1, 101) < 3: satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\"", "headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb", "type(val) is int or type(val) is float: return val val = val.strip() try:", "# Use this as our browser, to get past UCS 403 errors http_headers", "\"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number]", "line[103:109] perigee = line[111:117] radar_crosssec = line[119:127] orbit_status_code = line[129:132] satcat_tuple = (", "on SatDB data only.\"\"\" ) return satdb def format(val): if pd.isna(val): return None", "table...\") total_rows = 0 data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint", "type(val) is float: return val val = val.strip() try: return int(val.replace(\",\", \"\")) except:", "# satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns =", "pd.isna(val): return None if type(val).__module__ == \"numpy\": val = val.item() if type(val) is", "satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"]", "data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\",", "line[0:11] norad_number = line[13:18] multiple_name_flag = line[19] if not multiple_name_flag: multiple_name_flag = 0", "import logging log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this as our", "= satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\",", "console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb =", "not multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag = 1 payload_flag = line[20] if", "norad number {norad_number} in satdb is not found in the Celestrak Catalog. Relying", "if type(val) is int or type(val) is float: return val val = val.strip()", "number {norad_number} in satdb is not found in the Celestrak Catalog. Relying on", "necessary until our modules are public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import", "unique signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and", "= line[19] if not multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag = 1 payload_flag", "= pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = [] for row in", "the (corrected) ucs_satdb table...\") total_rows = 0 data_batch = [] for row in", "= pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\",", "len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make it print to the console. console =", "public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io import StringIO", "import md5 import os import sys # The following 4 lines are necessary", "log.info(\"Updating the ucs_satdb_raw table...\") total_rows = 0 data_batch = [] for row in", "= line[87:94] inclination_deg = line[96:101] apogee = line[103:109] perigee = line[111:117] radar_crosssec =", "= \"BLAH BLAH BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite with norad number {norad_number}", "CELESTRAK SATCAT data and loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv(", "import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io import StringIO import", "line[13:18] multiple_name_flag = line[19] if not multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag =", "ValueError): log.warning( f\"\"\"Satellite with norad number {norad_number} in satdb is not found in", "db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb =", "name, source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, ) return", ") data = [] for row in satcat.itertuples(index=False, name=None): row = [format(q) for", "python from hashlib import md5 import os import sys # The following 4", "pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass", "satcat.itertuples(index=False, name=None): row = [format(q) for q in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame(", "line[69:73] decay_date = line[75:85] orbit_period_minutes = line[87:94] inclination_deg = line[96:101] apogee = line[103:109]", "\"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return", "pass if not val or val == \"N/A\": return None return val def", "total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11]", "fingerprint_line(line): \"\"\" Creates a unique signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data():", "os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir)) import database from io import StringIO import pandas as pd import", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} def fingerprint_line(line): \"\"\" Creates a unique signature", "> 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows =", "database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db,", "pass try: return float(val.replace(\",\", \"\")) except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass", "Use this as our browser, to get past UCS 403 errors http_headers =", "The following 4 lines are necessary until our modules are public import inspect", "def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\"", "load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat", "row in satcat.itertuples(index=False, name=None): row = [format(q) for q in parse_celestrak_row(row[0])] data.append(row) df", "satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"]", "= satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] =", "def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\"", "table...\") data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for", "= \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row =", "== \"numpy\": val = val.item() if type(val) is int or type(val) is float:", "return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not", "403 errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like", "#!/usr/bin/env python from hashlib import md5 import os import sys # The following", "satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"]", "UCSATDB data and loading into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\"", "= ( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date, launch_site, decay_date, orbit_period_minutes,", "> 0: db.add_celestrak_satcat_batch(data_batch) # make it print to the console. console = logging.StreamHandler()", "\"\")) except: pass try: return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date()", "update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb = fix_discrepencies(satdb, satcat) update_ucs_satdb_table(db, satdb) log.info(\"Script Complete\") sys.exit(0)", "\"source_6\", \"source_7\", ] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and loading", "UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows(): norad_number =", "sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb = pd.read_csv(satdb_url, delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35]", "a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading into memory...\")", "savable = [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) total_rows = total_rows", "satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\",", "[format(i) for i in row] + [record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1", "satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i,", "\"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\",", "import os import sys # The following 4 lines are necessary until our", "md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading into memory...\") # satdb_url =", "update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\") total_rows = 0 data_batch = [] for", "\"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\",", "] return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and loading into memory...\")", "multiple_name_flag = line[19] if not multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag = 1", "load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb = fix_discrepencies(satdb, satcat) update_ucs_satdb_table(db,", "= line[20] if not payload_flag: payload_flag = 0 else: payload_flag = 1 ops_status_code", "as pd import requests from datetime import datetime import logging log = logging.getLogger(__name__)", "logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") # Use this as our browser, to get past", "satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i,", "return int(val.replace(\",\", \"\")) except: pass try: return float(val.replace(\",\", \"\")) except: pass try: return", "line[111:117] radar_crosssec = line[119:127] orbit_status_code = line[129:132] satcat_tuple = ( intl_desg, norad_number, multiple_name_flag,", "data.append(row) df = pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\",", "i in row] + [record_fingerprint] data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make", "\"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\",", "load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url", "and loading into memory...\") satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\",", "format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"]", "\"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\",", "# discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try:", "orbit_period_minutes = line[87:94] inclination_deg = line[96:101] apogee = line[103:109] perigee = line[111:117] radar_crosssec", "return df def fix_discrepencies(satdb, satcat): log.info(\"Fixing discrepencies in the UCS data...\") # discrepencies_url", "random.randint(1, 101) < 3: satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\" except (KeyError, ValueError):", "( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg,", "(corrected) ucs_satdb table...\") total_rows = 0 data_batch = [] for row in df.itertuples(index=False,", "radar_crosssec = line[119:127] orbit_status_code = line[129:132] satcat_tuple = ( intl_desg, norad_number, multiple_name_flag, payload_flag,", "= line[13:18] multiple_name_flag = line[19] if not multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag", "= format(satdb_row.loc[\"norad_number\"]) try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] =", "= satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101) < 3: satdb.loc[i, \"name\"] = \"BLAH", "val = val.item() if type(val) is int or type(val) is float: return val", "if not val or val == \"N/A\": return None return val def update_ucs_satdb_raw_table(Database,", "satdb.columns = [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\",", "satcat_row.loc[\"intl_desg\"] import random if random.randint(1, 101) < 3: satdb.loc[i, \"name\"] = \"BLAH BLAH", "= line[103:109] perigee = line[111:117] radar_crosssec = line[119:127] orbit_status_code = line[129:132] satcat_tuple =", "in parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\",", "delimiter=\"\\t\", encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\", \"country_owner\",", "is int or type(val) is float: return val val = val.strip() try: return", "record_fingerprint = fingerprint_line(\"\".join(str(e) for e in row)) savable = [format(i) for i in", "log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows = 0 data_batch = [] for row", "satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i,", "> 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11] norad_number = line[13:18] multiple_name_flag =", "= {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}", "is not found in the Celestrak Catalog. Relying on SatDB data only.\"\"\" )", "import requests from datetime import datetime import logging log = logging.getLogger(__name__) CONFIG =", "hashlib import md5 import os import sys # The following 4 lines are", "line[119:127] orbit_status_code = line[129:132] satcat_tuple = ( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name,", "loading into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url,", "== \"N/A\": return None return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw table...\")", "data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line):", "val == \"N/A\": return None return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating the ucs_satdb_raw", "def parse_celestrak_row(line): intl_desg = line[0:11] norad_number = line[13:18] multiple_name_flag = line[19] if not", "format(val): if pd.isna(val): return None if type(val).__module__ == \"numpy\": val = val.item() if", "db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat) satdb", "4 lines are necessary until our modules are public import inspect currentdir =", ") return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the celestrak_satcat table...\") data_batch = []", "e in row)) savable = [format(i) for i in row] + [record_fingerprint] data_batch.append(savable)", "\"\"\" Creates a unique signature from a line.\"\"\" return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching", "except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except:", "0: db.add_ucs_satdb_batch(data_batch) def parse_celestrak_row(line): intl_desg = line[0:11] norad_number = line[13:18] multiple_name_flag = line[19]", "row in df.itertuples(index=False, name=None): record_fingerprint = fingerprint_line(\"\".join(str(e) for e in row)) savable =", "= line[119:127] orbit_status_code = line[129:132] satcat_tuple = ( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code,", "\"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\",", "\"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\",", "satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = [] for row", "= [format(i) for i in row] + [record_fingerprint] data_batch.append(savable) if len(data_batch) > 0:", "satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite with norad", "for e in row)) savable = [format(i) for i in row] + [record_fingerprint]", "1 if len(data_batch) > 0: db.add_ucs_satdb_raw_batch(data_batch) def update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb", "with norad number {norad_number} in satdb is not found in the Celestrak Catalog.", "ops_status_code, name, source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, )", "\"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\",", "memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text", "return datetime.strptime(val, \"%m/%d/%y\").date() except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return", "are necessary until our modules are public import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(1,os.path.dirname(currentdir))", "http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76", "\"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\", \"GEO_longitude\", \"perigee_km\", \"apogee_km\", \"eccentricity\",", "satcat_url = \"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data =", "random if random.randint(1, 101) < 3: satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\" except", "encoding=\"Windows-1252\") satdb = satdb.iloc[:, :35] satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\",", "to get past UCS 403 errors http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1;", "try: satcat_row = satcat.loc[norad_number] satdb.loc[i, \"name\"] = satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i,", "val or val == \"N/A\": return None return val def update_ucs_satdb_raw_table(Database, df): log.info(\"Updating", "perigee = line[111:117] radar_crosssec = line[119:127] orbit_status_code = line[129:132] satcat_tuple = ( intl_desg,", "= satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] =", "in row] + [record_fingerprint] data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make it", "to the console. console = logging.StreamHandler() log.addHandler(console) log.setLevel(logging.DEBUG) db = database.Database(CONFIG) db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table()", "<gh_stars>10-100 #!/usr/bin/env python from hashlib import md5 import os import sys # The", "pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = [] for row in satcat.itertuples(index=False,", "type(val).__module__ == \"numpy\": val = val.item() if type(val) is int or type(val) is", "payload_flag = line[20] if not payload_flag: payload_flag = 0 else: payload_flag = 1", "datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not val or val == \"N/A\": return None", "import pandas as pd import requests from datetime import datetime import logging log", "\"perigee_km\", \"apogee_km\", \"eccentricity\", \"inclination_degrees\", \"period_minutes\", \"launch_mass_kg\", \"dry_mass_kg\", \"power_watts\", \"launch_date\", \"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\",", "log.info(\"Fixing discrepencies in the UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row", "1 ops_status_code = line[21] name = line[23:47] source = line[49:54] launch_date = line[56:66]", "intl_desg = line[0:11] norad_number = line[13:18] multiple_name_flag = line[19] if not multiple_name_flag: multiple_name_flag", "data_batch.append(savable) if len(data_batch) > 0: db.add_celestrak_satcat_batch(data_batch) # make it print to the console.", "import StringIO import pandas as pd import requests from datetime import datetime import", "sys.path.insert(1,os.path.dirname(currentdir)) import database from io import StringIO import pandas as pd import requests", "parse_celestrak_row(line): intl_desg = line[0:11] norad_number = line[13:18] multiple_name_flag = line[19] if not multiple_name_flag:", "from hashlib import md5 import os import sys # The following 4 lines", "satdb is not found in the Celestrak Catalog. Relying on SatDB data only.\"\"\"", "payload_flag = 1 ops_status_code = line[21] name = line[23:47] source = line[49:54] launch_date", "orbit_status_code = line[129:132] satcat_tuple = ( intl_desg, norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source,", "ucs_satdb_raw table...\") total_rows = 0 data_batch = [] for row in df.itertuples(index=False, name=None):", "db.create_celestrak_satcat_table() db.create_ucs_satdb_raw_table() db.create_ucs_satdb_table() satdb = load_ucs_satdb_data() satcat = load_celestrak_satcat_data() update_ucs_satdb_raw_table(db, satdb) update_celestrak_satcat_table(db, satcat)", "return satdb def load_celestrak_satcat_data(): log.info(\"Fetching CELESTRAK SATCAT data and loading into memory...\") satcat_url", "= line[0:11] norad_number = line[13:18] multiple_name_flag = line[19] if not multiple_name_flag: multiple_name_flag =", "satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\",", "< 3: satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\" except (KeyError, ValueError): log.warning( f\"\"\"Satellite", "\"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"]", "\"expected_lifetime_years\", \"contractor\", \"contractor_country\", \"launch_site\", \"launch_vehicle\", \"international_designator\", \"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\",", "os import sys # The following 4 lines are necessary until our modules", "datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date() except: pass if not val", "\"numpy\": val = val.item() if type(val) is int or type(val) is float: return", "from datetime import datetime import logging log = logging.getLogger(__name__) CONFIG = os.path.abspath(\"../../trusat-config.yaml\") #", "discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i, satdb_row in satdb.iterrows(): norad_number = format(satdb_row.loc[\"norad_number\"]) try: satcat_row", "\"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\")", "satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] = satcat_row.loc[\"inclination_deg\"] satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"]", "parse_celestrak_row(row[0])] data.append(row) df = pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\",", "\"decay_date\", \"orbit_period_minutes\", \"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df def", "if random.randint(1, 101) < 3: satdb.loc[i, \"name\"] = \"BLAH BLAH BLAH\" except (KeyError,", "\"inclination_deg\", \"apogee\", \"perigee\", \"radar_crosssec\", \"orbit_status_code\", ], ) df.set_index(\"norad_num\") return df def fix_discrepencies(satdb, satcat):", "except (KeyError, ValueError): log.warning( f\"\"\"Satellite with norad number {norad_number} in satdb is not", "satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = [] for row in satcat.itertuples(index=False, name=None):", "into memory...\") # satdb_url = \"https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt\" satdb_url = \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers=", "import database from io import StringIO import pandas as pd import requests from", "[record_fingerprint] data_batch.append(savable) total_rows = total_rows + 1 if len(data_batch) > 0: db.add_ucs_satdb_batch(data_batch) def", "satcat): log.info(\"Fixing discrepencies in the UCS data...\") # discrepencies_url = \"http://celestrak.com/pub/UCS-SD-Discrepancies.txt\" for i,", "line[87:94] inclination_deg = line[96:101] apogee = line[103:109] perigee = line[111:117] radar_crosssec = line[119:127]", "\"https://www.celestrak.com/pub/satcat.txt\" satcat = pd.read_csv( satcat_url, engine=\"python\", delimiter=r\"\\n\", encoding=\"Windows-1252\" ) data = [] for", "update_ucs_satdb_table(Database, df): log.info(\"Updating the (corrected) ucs_satdb table...\") total_rows = 0 data_batch = []", "df = pd.DataFrame( data, columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\",", ":35] satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\",", "inclination_deg, apogee, perigee, radar_crosssec, orbit_status_code, ) return satcat_tuple def update_celestrak_satcat_table(Database, df): log.info(\"Updating the", "None if type(val).__module__ == \"numpy\": val = val.item() if type(val) is int or", "= satcat_row.loc[\"name\"] satdb.loc[i, \"perigee_km\"] = satcat_row.loc[\"perigee\"] satdb.loc[i, \"apogee_km\"] = satcat_row.loc[\"apogee\"] satdb.loc[i, \"inclination_degrees\"] =", "decay_date = line[75:85] orbit_period_minutes = line[87:94] inclination_deg = line[96:101] apogee = line[103:109] perigee", "if type(val).__module__ == \"numpy\": val = val.item() if type(val) is int or type(val)", "satdb.applymap(format) satdb.columns = [ \"name\", \"country_registered\", \"country_owner\", \"owner_operator\", \"users\", \"purpose\", \"purpose_detailed\", \"orbit_class\", \"orbit_type\",", "columns=[ \"intl_desg\", \"norad_num\", \"multiple_name_flag\", \"payload_flag\", \"ops_status_code\", \"name\", \"source\", \"launch_date\", \"launch_site\", \"decay_date\", \"orbit_period_minutes\", \"inclination_deg\",", "pd import requests from datetime import datetime import logging log = logging.getLogger(__name__) CONFIG", "norad_number, multiple_name_flag, payload_flag, ops_status_code, name, source, launch_date, launch_site, decay_date, orbit_period_minutes, inclination_deg, apogee, perigee,", "for row in satcat.itertuples(index=False, name=None): row = [format(q) for q in parse_celestrak_row(row[0])] data.append(row)", "launch_site = line[69:73] decay_date = line[75:85] orbit_period_minutes = line[87:94] inclination_deg = line[96:101] apogee", "if not multiple_name_flag: multiple_name_flag = 0 else: multiple_name_flag = 1 payload_flag = line[20]", "satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i, \"international_designator\"] = satcat_row.loc[\"intl_desg\"]", "return md5(line.encode(\"utf-8\")).hexdigest() def load_ucs_satdb_data(): log.info(\"Fetching UCSATDB data and loading into memory...\") # satdb_url", "\"%m/%d/%y\").date() except: pass try: return datetime.strptime(val, \"%m/%d/%Y\").date() except: pass try: return datetime.strptime(val, \"%Y/%m/%d\").date()", "total_rows = 0 data_batch = [] for row in df.itertuples(index=False, name=None): record_fingerprint =", "= \"https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt\" # https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error s=requests.get(satdb_url, headers= http_headers).text satdb=pd.read_csv(StringIO(s), sep=\";\", delimiter=\"\\t\", encoding=\"Windows-1252\") # satdb", "only.\"\"\" ) return satdb def format(val): if pd.isna(val): return None if type(val).__module__ ==", "\"norad_number\", \"comments\", \"detailed_comments\", \"source_1\", \"source_2\", \"source_3\", \"source_4\", \"source_5\", \"source_6\", \"source_7\", ] return satdb", "satdb.loc[i, \"period_minutes\"] = satcat_row.loc[\"orbit_period_minutes\"] satdb.loc[i, \"launch_date\"] = satcat_row.loc[\"launch_date\"] satdb.loc[i, \"launch_site\"] = satcat_row.loc[\"launch_site\"] satdb.loc[i," ]
[ "设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = { # 构造要计算的数据 'type':", "'quantity': 99 }], 'packingContainers': [{ # 容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38,", "2.38, 'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels':", "name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id }) access_token", "5.89, 'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers':", "account name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id })", "'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id }) access_token =", "import json import requests if __name__ == '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ #", "== '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your app id', #", "'username': 'your account name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant id' #", "r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer ' + access_token, 'content-type': 'application/json'", "1.1, 'width': 0.8, 'height': 0.6, 'weight': 0.5, 'quantity': 99 }], 'packingContainers': [{ #", "data={ # URL 'client_id': 'your app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret',", "json import requests if __name__ == '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL", "# 从响应中获取access_token taskdata = { # 构造要计算的数据 'type': 0, 'packingCargoes': [{ # 货物数据", "'password', 'username': 'your account name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant id'", "secret', 'grant_type': 'password', 'username': 'your account name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your", "构造要计算的数据 'type': 0, 'packingCargoes': [{ # 货物数据 'name': 'cargo1', 'length': 1.1, 'width': 0.8,", "requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app", "'height': 0.6, 'weight': 0.5, 'quantity': 99 }], 'packingContainers': [{ # 容器数据 'name': \"20ft\",", "'type': 0, 'packingCargoes': [{ # 货物数据 'name': 'cargo1', 'length': 1.1, 'width': 0.8, 'height':", "[], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': [] } r", "2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions':", "}], 'packingContainers': [{ # 容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89,", "}) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = { # 构造要计算的数据 'type': 0,", "# 容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000 }],", "[{ # 货物数据 'name': 'cargo1', 'length': 1.1, 'width': 0.8, 'height': 0.6, 'weight': 0.5,", "\"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions':", "# 构造要计算的数据 'type': 0, 'packingCargoes': [{ # 货物数据 'name': 'cargo1', 'length': 1.1, 'width':", "tenant id' # 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = {", "填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type': 'password', 'username': 'your account name(admin)', 'password': '<PASSWORD>'", "}, cookies={ 'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] #", "从响应中获取access_token taskdata = { # 构造要计算的数据 'type': 0, 'packingCargoes': [{ # 货物数据 'name':", "0, 'packingCargoes': [{ # 货物数据 'name': 'cargo1', 'length': 1.1, 'width': 0.8, 'height': 0.6,", "cookies={ 'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token", "'grant_type': 'password', 'username': 'your account name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant", "'cargo1', 'length': 1.1, 'width': 0.8, 'height': 0.6, 'weight': 0.5, 'quantity': 99 }], 'packingContainers':", "'client_secret': 'your app secret', 'grant_type': 'password', 'username': 'your account name(admin)', 'password': '<PASSWORD>' },", "0.6, 'weight': 0.5, 'quantity': 99 }], 'packingContainers': [{ # 容器数据 'name': \"20ft\", 'InnerX':", "'name': 'cargo1', 'length': 1.1, 'width': 0.8, 'height': 0.6, 'weight': 0.5, 'quantity': 99 }],", "id' # 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = { #", "'your app secret', 'grant_type': 'password', 'username': 'your account name(admin)', 'password': '<PASSWORD>' }, cookies={", "'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [],", "0.5, 'quantity': 99 }], 'packingContainers': [{ # 容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY':", "__name__ == '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your app id',", "access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = { # 构造要计算的数据 'type': 0, 'packingCargoes':", "{ # 构造要计算的数据 'type': 0, 'packingCargoes': [{ # 货物数据 'name': 'cargo1', 'length': 1.1,", "# 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = { # 构造要计算的数据", "taskdata = { # 构造要计算的数据 'type': 0, 'packingCargoes': [{ # 货物数据 'name': 'cargo1',", "# 货物数据 'name': 'cargo1', 'length': 1.1, 'width': 0.8, 'height': 0.6, 'weight': 0.5, 'quantity':", "}], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': []", "# 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type': 'password', 'username': 'your account name(admin)', 'password':", "'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': [] }", "'pointedContainers': [], 'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer", "import requests if __name__ == '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id':", "= { # 构造要计算的数据 'type': 0, 'packingCargoes': [{ # 货物数据 'name': 'cargo1', 'length':", "'__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your app id', # 填写app-id、app-secret、账号及密码", "[] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer ' + access_token,", "= requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your", "货物数据 'name': 'cargo1', 'length': 1.1, 'width': 0.8, 'height': 0.6, 'weight': 0.5, 'quantity': 99", "[], 'pointedContainers': [], 'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization':", "requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer ' + access_token, 'content-type': 'application/json' }) #", "0.8, 'height': 0.6, 'weight': 0.5, 'quantity': 99 }], 'packingContainers': [{ # 容器数据 'name':", "[], 'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer '", "<gh_stars>0 import json import requests if __name__ == '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={", "{}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask',", "'your tenant id' # 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata =", "'your account name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id", "requests if __name__ == '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your", "= requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer ' + access_token, 'content-type': 'application/json' })", "'weight': 0.5, 'quantity': 99 }], 'packingContainers': [{ # 容器数据 'name': \"20ft\", 'InnerX': 2.35,", "app secret', 'grant_type': 'password', 'username': 'your account name(admin)', 'password': '<PASSWORD>' }, cookies={ 'Abp.TenantId':", "'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions': {},", "{}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}),", "'your app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type': 'password', 'username': 'your", "'packingCargoes': [{ # 货物数据 'name': 'cargo1', 'length': 1.1, 'width': 0.8, 'height': 0.6, 'weight':", "'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': [] } r =", "json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = { # 构造要计算的数据 'type': 0, 'packingCargoes': [{ #", "'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers': [],", "'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata", "'packingContainers': [{ # 容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload':", "# URL 'client_id': 'your app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type':", "app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type': 'password', 'username': 'your account", "'length': 1.1, 'width': 0.8, 'height': 0.6, 'weight': 0.5, 'quantity': 99 }], 'packingContainers': [{", "data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer ' + access_token, 'content-type': 'application/json' }) # 发送请求,使用access_token进行认证,Body为要计算的数据", "taskdata}), headers={ 'Authorization': 'bearer ' + access_token, 'content-type': 'application/json' }) # 发送请求,使用access_token进行认证,Body为要计算的数据 print(r.text)", "'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={", "'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer ' +", "'width': 0.8, 'height': 0.6, 'weight': 0.5, 'quantity': 99 }], 'packingContainers': [{ # 容器数据", "id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type': 'password', 'username': 'your account name(admin)',", "} r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={ 'Authorization': 'bearer ' + access_token, 'content-type':", "'client_id': 'your app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type': 'password', 'username':", "if __name__ == '__main__': response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your app", "'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [],", "'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes': [] } r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData':", "99 }], 'packingContainers': [{ # 容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ':", "= json.loads(response.text)['access_token'] # 从响应中获取access_token taskdata = { # 构造要计算的数据 'type': 0, 'packingCargoes': [{", "'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {},", "URL 'client_id': 'your app id', # 填写app-id、app-secret、账号及密码 'client_secret': 'your app secret', 'grant_type': 'password',", "[{ # 容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000", "容器数据 'name': \"20ft\", 'InnerX': 2.35, 'InnerY': 2.38, 'InnerZ': 5.89, 'Maxload': 21000 }], 'interimContainers':", "21000 }], 'interimContainers': [], 'loadingOptions': {}, 'interimOptions': {}, 'predefinedModels': [], 'pointedContainers': [], 'skuCargoes':", "response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL 'client_id': 'your app id', # 填写app-id、app-secret、账号及密码 'client_secret':", "'<PASSWORD>' }, cookies={ 'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id }) access_token = json.loads(response.text)['access_token']" ]
[ "GPIO.output(self.pin_id, state) self.state = state def get(self): return self.state @classmethod def cleanup(cls): GPIO.cleanup()", "def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state = output return output def", "return output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state def", "RPi.GPIO as GPIO class Pin: def __init__(self, pin_id): self.pin_id = pin_id self.state =", "pin_id self.state = Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state", "GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state def get(self): return self.state @classmethod def", "GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state = output return output def set(self, state):", "import RPi.GPIO as GPIO class Pin: def __init__(self, pin_id): self.pin_id = pin_id self.state", "output return output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state", "GPIO class Pin: def __init__(self, pin_id): self.pin_id = pin_id self.state = Off GPIO.setmode(GPIO.BCM)", "Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state = output return", "GPIO.IN) output = GPIO.input(self.pin_id) self.state = output return output def set(self, state): GPIO.setup(self.pin_id,", "= pin_id self.state = Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id)", "__init__(self, pin_id): self.pin_id = pin_id self.state = Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN)", "sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state = output return output def set(self,", "self.state = output return output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state", "set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state def get(self): return self.state", "Pin: def __init__(self, pin_id): self.pin_id = pin_id self.state = Off GPIO.setmode(GPIO.BCM) def sense(self):", "class Pin: def __init__(self, pin_id): self.pin_id = pin_id self.state = Off GPIO.setmode(GPIO.BCM) def", "GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state def get(self): return self.state @classmethod def cleanup(cls):", "= output return output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state =", "output = GPIO.input(self.pin_id) self.state = output return output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT)", "output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state def get(self):", "= GPIO.input(self.pin_id) self.state = output return output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id,", "self.pin_id = pin_id self.state = Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output =", "self.state = Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state =", "= Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state = output", "pin_id): self.pin_id = pin_id self.state = Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output", "GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id, GPIO.IN) output = GPIO.input(self.pin_id) self.state = output return output", "def __init__(self, pin_id): self.pin_id = pin_id self.state = Off GPIO.setmode(GPIO.BCM) def sense(self): GPIO.setup(self.pin_id,", "as GPIO class Pin: def __init__(self, pin_id): self.pin_id = pin_id self.state = Off", "def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state def get(self): return", "GPIO.input(self.pin_id) self.state = output return output def set(self, state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state)", "state): GPIO.setup(self.pin_id, GPIO.OUT) GPIO.output(self.pin_id, state) self.state = state def get(self): return self.state @classmethod" ]
[ "collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories =", "count if total_votes > 0: suggested_age = round(sum / total_votes, 2) return suggested_age", "self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks", "game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not game_data[\"numowned\"]: return 0", "game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition", "game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game", "or -1))] def calc_suggested_age(self, game_data): sum = 0 total_votes = 0 suggested_age =", "expansion in expansions: for expansion_num, support in expansion.players: if expansion_num not in [num", "self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or", "game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda", "num_players.append((str(supported_num), \"supported\")) # Add number of players from expansions for expansion in expansions:", "= ['A', 'An', 'The'] class BoardGame: def __init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id", "El Grande Big Box if any(\"Big Box\" in title for title in game_titles):", "game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in", "calc_suggested_age(self, game_data): sum = 0 total_votes = 0 suggested_age = 0 for player_age", "game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num > 0 and", "on every expansion\"\"\" game = game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) #", "= sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove \"+ player counts if they", "\"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning up linked", "game_titles): game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King of New York\")", "game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in", "in title for title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE)", "def calc_suggested_age(self, game_data): sum = 0 total_votes = 0 suggested_age = 0 for", "= game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year", "repeated on every expansion\"\"\" game = game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip())", "game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year =", "self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g:", "def calc_playing_time(self, game_data): playing_time_mapping = { 30: '< 30min', 60: '30min - 1h',", "self.contained = game_data[\"contained\"] self.families = game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers", "items to remove duplicate data, such as the title being repeated on every", "self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data)", "\"Light\", 2: \"Light Medium\", 3: \"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\", } return", "(self.__class__ == other.__class__ and self.id == other.id) def calc_num_players(self, game_data, expansions): num_players =", "= list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions)", "game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne Big Box 5, Alien Frontiers Big Box,", "for num, _ in num_players]: num_players.append((str(supported_num), \"supported\")) # Add number of players from", "and int(player[0]) < 14 ] return num_players def calc_playing_time(self, game_data): playing_time_mapping = {", "self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return hash(self.id)", "BGG Rank, since it's already handled elsewhere other_ranks = list(filter(lambda g: g[\"id\"] !=", "flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\" in game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0,", "def __init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"] name = collection_data[\"name\"] if", "= self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage =", "= accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"]", "title[0] self.name = name self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"]", "accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id", "elif \"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in", "'30min - 1h', 120: '1-2h', 180: '2-3h', 240: '3-4h', } for playing_time_max, playing_time", "\"Not Ranked\", game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\",", "collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def __eq__(self,", "return (self.__class__ == other.__class__ and self.id == other.id) def calc_num_players(self, game_data, expansions): num_players", "for expansion in expansions: for expansion_num, support in expansion.players: if expansion_num not in", "[num for num, _ in num_players]: #TODO another expansion may upgrade this player", "in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of", "Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\")", "game_data[\"contained\"] self.families = game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"]", "other): return (self.__class__ == other.__class__ and self.id == other.id) def calc_num_players(self, game_data, expansions):", "game_data[\"max_players\"] + 1): if supported_num > 0 and str(supported_num) not in [num for", "def filter_other_ranks(self, game_data): # Remove the BGG Rank, since it's already handled elsewhere", "\"The Millennium Series\") game_titles.insert(0, \"Chronicles of Crime: The Millennium Series\") elif any(title in", "if supported_num > 0 and str(supported_num) not in [num for num, _ in", "York\") game_titles.insert(0, \"King of Tokyo/King of New York\") elif \"Legends of Andor\" in", "in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\") elif \"Viticulture Essential Edition\" in", "Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def calc_average(self,", "not game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping = { -1:", "def calc_weight(self, game_data): weight_mapping = { -1: \"Unknown\", 0: \"Light\", 1: \"Light\", 2:", "\"King of New York\") for title in game_titles): game_titles.insert(0, \"King of Tokyo/New York\")", "\"Legends of Andor\" in game_titles: game_titles.append(\"Die Legenden von Andor\") elif \"No Thanks!\" in", "= re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\" in game_titles:", "30min', 60: '30min - 1h', 120: '1-2h', 180: '2-3h', 240: '3-4h', } for", "self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda g:", "player[0][-1] != \"+\" and int(player[0]) < 14 ] return num_players def calc_playing_time(self, game_data):", "Ranked\": return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return 0", "None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping = { -1: \"Unknown\", 0: \"Light\",", "Remove the BGG Rank, since it's already handled elsewhere other_ranks = list(filter(lambda g:", "supported if support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players,", "1): if supported_num > 0 and str(supported_num) not in [num for num, _", "Big Box, El Grande Big Box if any(\"Big Box\" in title for title", "support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda x:", "= collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players", "return '> 4h' def calc_rank(self, game_data): if not game_data[\"rank\"] or game_data[\"rank\"] == \"Not", "Frontiers Big Box, El Grande Big Box if any(\"Big Box\" in title for", "Rivals for Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in", "= self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time", "rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self,", "Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\")", "= game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"],", "Millennium Series\") elif any(title in (\"King of Tokyo\", \"King of New York\") for", "[num for num, _ in num_players]: num_players.append((str(supported_num), \"supported\")) # Add number of players", "self.id == other.id) def calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in", "import re articles = ['A', 'An', 'The'] class BoardGame: def __init__(self, game_data, collection_data,", "\"Light\", 1: \"Light\", 2: \"Light Medium\", 3: \"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\",", "game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition #", "Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne Big Box", "game_data, collection_data): \"\"\"rules for cleaning up linked items to remove duplicate data, such", "suggested_age def filter_other_ranks(self, game_data): # Remove the BGG Rank, since it's already handled", "not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return playing_time return '> 4h'", "in (\"King of Tokyo\", \"King of New York\") for title in game_titles): game_titles.insert(0,", "#game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([", "game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne Big Box 5,", "the supported if support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players =", "self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in collection_data:", "accessories=[]): self.id = game_data[\"id\"] name = collection_data[\"name\"] if len(name) == 0: name =", "= [ player for player in num_players[:-1] if player[0][-1] != \"+\" and int(player[0])", "of Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King of New York\") elif \"Legends of", "> 0 and str(supported_num) not in [num for num, _ in num_players]: num_players.append((str(supported_num),", "game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten", "} for playing_time_max, playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max", "last in the list num_players[:-1] = [ player for player in num_players[:-1] if", "game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die", "playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return playing_time return", "self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays", "= 0 for player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum += int(player_age[\"age\"]) *", "game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players =", "= collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def", "game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles of Crime: The Millennium Series\") elif", "datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def", "html import re articles = ['A', 'An', 'The'] class BoardGame: def __init__(self, game_data,", "= 0 total_votes = 0 suggested_age = 0 for player_age in game_data[\"suggested_playerages\"]: count", "g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"]", "not in [num for num, _ in num_players]: num_players.append((str(supported_num), \"supported\")) # Add number", "calc_numowned(self, game_data): if not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if", "== \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\",", "another expansion may upgrade this player count to remove the supported if support", "import Decimal from datetime import datetime import html import re articles = ['A',", "decimal import Decimal from datetime import datetime import html import re articles =", "def __hash__(self): return hash(self.id) def __eq__(self, other): return (self.__class__ == other.__class__ and self.id", "= game_data[\"id\"] name = collection_data[\"name\"] if len(name) == 0: name = game_data[\"name\"] alt_names", "1h', 120: '1-2h', 180: '2-3h', 240: '3-4h', } for playing_time_max, playing_time in playing_time_mapping.items():", "game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families = game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"]", "if playing_time_max > int(game_data[\"playing_time\"]): return playing_time return '> 4h' def calc_rank(self, game_data): if", "return hash(self.id) def __eq__(self, other): return (self.__class__ == other.__class__ and self.id == other.id)", "+= int(player_age[\"age\"]) * count total_votes += count if total_votes > 0: suggested_age =", "weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data): sum = 0 total_votes = 0 suggested_age", "elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in game_titles: game_titles.append(\"The Rivals", "game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\")", "= ' '.join(title[1:]) + \", \" + title[0] self.name = name self.description =", "= datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"]", "0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"])", "0 suggested_age = 0 for player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum +=", "expansions) self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data)", "game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not game_data[\"average\"]: return None", "= self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating =", "import datetime import html import re articles = ['A', 'An', 'The'] class BoardGame:", "re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning", "\"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data): sum = 0 total_votes", "60: '30min - 1h', 120: '1-2h', 180: '2-3h', 240: '3-4h', } for playing_time_max,", "None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not game_data[\"average\"]: return None return Decimal(game_data[\"average\"])", "== \"Not Ranked\": return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]:", "name = collection_data[\"name\"] if len(name) == 0: name = game_data[\"name\"] alt_names = self.gen_name_list(game_data,", "Ranked\", game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"])", "4: \"Medium Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data):", "self.expansions = expansions self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name =", "# No Edition # Carcassonne Big Box 5, Alien Frontiers Big Box, El", "for Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in game_titles:", "Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne Big Box 5, Alien Frontiers Big", "\"\"))) # Remove \"+ player counts if they are not the last in", "weight_mapping = { -1: \"Unknown\", 0: \"Light\", 1: \"Light\", 2: \"Light Medium\", 3:", "game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]])", "str(supported_num) not in [num for num, _ in num_players]: num_players.append((str(supported_num), \"supported\")) # Add", "game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) # No", "30: '< 30min', 60: '30min - 1h', 120: '1-2h', 180: '2-3h', 240: '3-4h',", "such as the title being repeated on every expansion\"\"\" game = game_data[\"name\"] game_titles", "Edition # Carcassonne Big Box 5, Alien Frontiers Big Box, El Grande Big", "supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num > 0 and str(supported_num) not", "\"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif", "in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in game_titles: game_titles.append(\"The Rivals for Catan\")", "self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage", "expansions self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year", "= collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions", "elif \"Legends of Andor\" in game_titles: game_titles.append(\"Die Legenden von Andor\") elif \"No Thanks!\"", "other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning up linked items to remove", "the title being repeated on every expansion\"\"\" game = game_data[\"name\"] game_titles = []", "game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if", "4h' def calc_rank(self, game_data): if not game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\": return", "game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating =", "] return num_players def calc_playing_time(self, game_data): playing_time_mapping = { 30: '< 30min', 60:", "1: \"Light\", 2: \"Light Medium\", 3: \"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\", }", "\", \" + title[0] self.name = name self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"]", "\"1\" and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"]", "collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment =", "0 for player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count", "= game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby =", "return None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping = { -1: \"Unknown\", 0:", "and str(supported_num) not in [num for num, _ in num_players]: num_players.append((str(supported_num), \"supported\")) #", "hash(self.id) def __eq__(self, other): return (self.__class__ == other.__class__ and self.id == other.id) def", "game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\" in", "elif \"Rivals for Catan\" in game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten von", "if len(name) == 0: name = game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names =", "may upgrade this player count to remove the supported if support == \"supported\":", "count to remove the supported if support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num,", "'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return playing_time return '> 4h' def calc_rank(self, game_data):", "< 14 ] return num_players def calc_playing_time(self, game_data): playing_time_mapping = { 30: '<", "= round(sum / total_votes, 2) return suggested_age def filter_other_ranks(self, game_data): # Remove the", "'3-4h', } for playing_time_max, playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown' if", "up linked items to remove duplicate data, such as the title being repeated", "for expansion_num, support in expansion.players: if expansion_num not in [num for num, _", "game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families = game_data[\"families\"] self.artists = game_data[\"artists\"]", "or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\"", "game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not g[\"inbound\"],", "self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families", "= game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated =", "== other.__class__ and self.id == other.id) def calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy()", "x: int(x[0].replace(\"+\", \"\"))) # Remove \"+ player counts if they are not the", "duplicate data, such as the title being repeated on every expansion\"\"\" game =", "Millennium Series\") game_titles.insert(0, \"Chronicles of Crime: The Millennium Series\") elif any(title in (\"King", "if title[0] in articles: name = ' '.join(title[1:]) + \", \" + title[0]", "sum = 0 total_votes = 0 suggested_age = 0 for player_age in game_data[\"suggested_playerages\"]:", "playing_time_max, playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]):", "self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time =", "game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title", "'.join(title[1:]) + \", \" + title[0] self.name = name self.description = html.unescape(game_data[\"description\"]) self.categories", "self.players = self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"]", "5, Alien Frontiers Big Box, El Grande Big Box if any(\"Big Box\" in", "if they are not the last in the list num_players[:-1] = [ player", "Andor\") elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\" in", "collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions =", "calc_rank(self, game_data): if not game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\": return None return", "'An', 'The'] class BoardGame: def __init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"]", "The Lincoln Assassination Trial\") elif \"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([", "int(game_data[\"playing_time\"]): return playing_time return '> 4h' def calc_rank(self, game_data): if not game_data[\"rank\"] or", "being repeated on every expansion\"\"\" game = game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game)", "suggested_age = round(sum / total_votes, 2) return suggested_age def filter_other_ranks(self, game_data): # Remove", "in game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\")", "list(filter(lambda g: g[\"id\"] != \"1\" and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for i,", "game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list, keeping", "the list num_players[:-1] = [ player for player in num_players[:-1] if player[0][-1] !=", "game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in", "\"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories = accessories self.lastmodified", "' '.join(title[1:]) + \", \" + title[0] self.name = name self.description = html.unescape(game_data[\"description\"])", "players from expansions for expansion in expansions: for expansion_num, support in expansion.players: if", "Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne Big Box 5, Alien Frontiers", "self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"]", "Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping = { -1: \"Unknown\", 0: \"Light\", 1: \"Light\",", "Box\" in title for title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game,", "return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not game_data[\"rating\"]: return None return", "playing_time_max > int(game_data[\"playing_time\"]): return playing_time return '> 4h' def calc_rank(self, game_data): if not", "collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment =", "remove the supported if support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players", "game_data, collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"] name = collection_data[\"name\"] if len(name) ==", "Tokyo/King of New York\") elif \"Legends of Andor\" in game_titles: game_titles.append(\"Die Legenden von", "if total_votes > 0: suggested_age = round(sum / total_votes, 2) return suggested_age def", "None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"])", "\"Not Ranked\": return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return", "of Crime: The Millennium Series\") elif any(title in (\"King of Tokyo\", \"King of", "cleaning up linked items to remove duplicate data, such as the title being", "World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\") elif \"Viticulture", "240: '3-4h', } for playing_time_max, playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown'", "round(sum / total_votes, 2) return suggested_age def filter_other_ranks(self, game_data): # Remove the BGG", "\"+\" and int(player[0]) < 14 ] return num_players def calc_playing_time(self, game_data): playing_time_mapping =", "game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data)", "Big Box 5, Alien Frontiers Big Box, El Grande Big Box if any(\"Big", "in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([ game[\"name\"] for game in", "\"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power", "def calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] +", "game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\"", "Add number of players from expansions for expansion in expansions: for expansion_num, support", "of Crime\" in game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles of Crime: The", "for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([ game[\"name\"] for", "in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return playing_time", "from expansions for expansion in expansions: for expansion_num, support in expansion.players: if expansion_num", "num_players]: #TODO another expansion may upgrade this player count to remove the supported", "self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags =", "game_titles: game_titles.append(\"Die Legenden von Andor\") elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif", "of players from expansions for expansion in expansions: for expansion_num, support in expansion.players:", "def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data):", "self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"]))", "num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove \"+ player counts if", "New York\") for title in game_titles): game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0, \"King", "gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning up linked items to remove duplicate data,", "enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data, collection_data):", "= game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) #", "elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\") elif \"Viticulture Essential", "No Edition # Carcassonne Big Box 5, Alien Frontiers Big Box, El Grande", "The Millennium Series\") elif any(title in (\"King of Tokyo\", \"King of New York\")", "the last in the list num_players[:-1] = [ player for player in num_players[:-1]", "num, _ in num_players]: num_players.append((str(supported_num), \"supported\")) # Add number of players from expansions", "key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove \"+ player counts if they are not", "game = game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip())", "num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove \"+ player", "collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def __eq__(self, other): return (self.__class__", "list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight", "\"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in game_titles: game_titles.append(\"The Rivals for", "expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num", "in [num for num, _ in num_players]: #TODO another expansion may upgrade this", "= self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank =", "\"King of Tokyo/King of New York\") elif \"Legends of Andor\" in game_titles: game_titles.append(\"Die", "\"+ player counts if they are not the last in the list num_players[:-1]", "name = game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the", "total_votes += count if total_votes > 0: suggested_age = round(sum / total_votes, 2)", "'The'] class BoardGame: def __init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"] name", "playing_time return '> 4h' def calc_rank(self, game_data): if not game_data[\"rank\"] or game_data[\"rank\"] ==", "datetime import datetime import html import re articles = ['A', 'An', 'The'] class", "expansion may upgrade this player count to remove the supported if support ==", "len(name) == 0: name = game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names))", "game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping = { -1: \"Unknown\",", "= game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"]", "collection_data): \"\"\"rules for cleaning up linked items to remove duplicate data, such as", "self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families = game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers", "== other.id) def calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"],", "data, such as the title being repeated on every expansion\"\"\" game = game_data[\"name\"]", "York\") elif \"Legends of Andor\" in game_titles: game_titles.append(\"Die Legenden von Andor\") elif \"No", "return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return 0 return", "in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num > 0 and str(supported_num) not in", "in num_players]: #TODO another expansion may upgrade this player count to remove the", "14 ] return num_players def calc_playing_time(self, game_data): playing_time_mapping = { 30: '< 30min',", "game_data): weight_mapping = { -1: \"Unknown\", 0: \"Light\", 1: \"Light\", 2: \"Light Medium\",", "Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"]", "in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for", "return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping = { -1: \"Unknown\", 0: \"Light\", 1:", "\"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\")))", "\"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\") elif \"Viticulture Essential Edition\"", "_ in num_players]: num_players.append((str(supported_num), \"supported\")) # Add number of players from expansions for", "Crime: The Millennium Series\") elif any(title in (\"King of Tokyo\", \"King of New", "return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def", "__eq__(self, other): return (self.__class__ == other.__class__ and self.id == other.id) def calc_num_players(self, game_data,", "playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return", "return other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning up linked items to", "return num_players def calc_playing_time(self, game_data): playing_time_mapping = { 30: '< 30min', 60: '30min", "self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"]", "count total_votes += count if total_votes > 0: suggested_age = round(sum / total_votes,", "game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif", "collection_data[\"name\"] if len(name) == 0: name = game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names", "number of players from expansions for expansion in expansions: for expansion_num, support in", "= game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements =", "if player[0][-1] != \"+\" and int(player[0]) < 14 ] return num_players def calc_playing_time(self,", "player in num_players[:-1] if player[0][-1] != \"+\" and int(player[0]) < 14 ] return", "-1))] def calc_suggested_age(self, game_data): sum = 0 total_votes = 0 suggested_age = 0", "for title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if", "expansion_num not in [num for num, _ in num_players]: #TODO another expansion may", "in game_titles): game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King of New", "int(player_age[\"age\"]) * count total_votes += count if total_votes > 0: suggested_age = round(sum", "Sch#!?e\") elif \"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles:", "or game_data[\"rank\"] == \"Not Ranked\": return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if", "not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data)", "return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not game_data[\"numowned\"]: return 0 return", "0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"])", "%H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return", "0 total_votes = 0 suggested_age = 0 for player_age in game_data[\"suggested_playerages\"]: count =", "__hash__(self): return hash(self.id) def __eq__(self, other): return (self.__class__ == other.__class__ and self.id ==", "Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self,", "num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) #", "= self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags", "def gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning up linked items to remove duplicate", "Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King of New York\") elif \"Legends of Andor\"", "* count total_votes += count if total_votes > 0: suggested_age = round(sum /", "not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not game_data[\"average\"]: return", "self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"]", "game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return", "game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or", "= collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment", "self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"]))", "list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates =", "elif any(title in (\"King of Tokyo\", \"King of New York\") for title in", "[] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) #", "player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count total_votes +=", "== 0: name = game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) #", "Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals", "game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count total_votes += count if", "{ -1: \"Unknown\", 0: \"Light\", 1: \"Light\", 2: \"Light Medium\", 3: \"Medium\", 4:", "2) return suggested_age def filter_other_ranks(self, game_data): # Remove the BGG Rank, since it's", "Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\" in game_titles: game_titles.insert(0, \"The", "return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return playing_time return '> 4h' def calc_rank(self,", "of New York\") elif \"Legends of Andor\" in game_titles: game_titles.append(\"Die Legenden von Andor\")", "total_votes = 0 suggested_age = 0 for player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"]", "0 and str(supported_num) not in [num for num, _ in num_players]: num_players.append((str(supported_num), \"supported\"))", "in expansion.players: if expansion_num not in [num for num, _ in num_players]: #TODO", "# Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne", "self.name = name self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained", "game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in", "datetime import html import re articles = ['A', 'An', 'The'] class BoardGame: def", "if \"Chronicles of Crime\" in game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles of", "game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\") elif", "list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order title = name.split() if title[0] in", "Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in game_titles: game_titles.append(\"The", "expansion_num, support in expansion.players: if expansion_num not in [num for num, _ in", "other_ranks = list(filter(lambda g: g[\"id\"] != \"1\" and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"]))", "num, _ in num_players]: #TODO another expansion may upgrade this player count to", "title being repeated on every expansion\"\"\" game = game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"])", "self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned", "# De-dupe the list, keeping order title = name.split() if title[0] in articles:", "in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif", "i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def", "from datetime import datetime import html import re articles = ['A', 'An', 'The']", "= list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order title = name.split() if title[0]", "Series\") elif any(title in (\"King of Tokyo\", \"King of New York\") for title", "#game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"integrates\"]]) return", "collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"] name = collection_data[\"name\"] if len(name) == 0:", "collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if", "total_votes > 0: suggested_age = round(sum / total_votes, 2) return suggested_age def filter_other_ranks(self,", "calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1):", "# Remove the BGG Rank, since it's already handled elsewhere other_ranks = list(filter(lambda", "linked items to remove duplicate data, such as the title being repeated on", "New York\") elif \"Legends of Andor\" in game_titles: game_titles.append(\"Die Legenden von Andor\") elif", "+= count if total_votes > 0: suggested_age = round(sum / total_votes, 2) return", "+ 1): if supported_num > 0 and str(supported_num) not in [num for num,", "for Catan\" in game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan:", "self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image = collection_data[\"image_version\"]", "game_data): # Remove the BGG Rank, since it's already handled elsewhere other_ranks =", "- 1h', 120: '1-2h', 180: '2-3h', 240: '3-4h', } for playing_time_max, playing_time in", "def calc_rank(self, game_data): if not game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\": return None", "def calc_rating(self, game_data): if not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data):", "self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"])", "2: \"Light Medium\", 3: \"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"]", "\"Medium Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data): sum", "if not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return playing_time return '>", "= player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count total_votes += count if total_votes >", "g[\"id\"] != \"1\" and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for i, rank in", "+ title[0] self.name = name self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics =", "Crime\" in game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles of Crime: The Millennium", "name self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"]", "York\") for title in game_titles): game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0, \"King of", "= list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S')", "if not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not game_data[\"rating\"]:", "of New York\") for title in game_titles): game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0,", "= { 30: '< 30min', 60: '30min - 1h', 120: '1-2h', 180: '2-3h',", "\"\"\"rules for cleaning up linked items to remove duplicate data, such as the", "game_titles.insert(0, \"King of Tokyo/King of New York\") elif \"Legends of Andor\" in game_titles:", "self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families = game_data[\"families\"] self.artists", "\"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove", "game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"]))", "for title in game_titles): game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King", "game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not game_data[\"rating\"]: return None", "for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num > 0 and str(supported_num)", "g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\"", "Decimal from datetime import datetime import html import re articles = ['A', 'An',", "= re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules for", "re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\" in game_titles: game_titles.insert(0,", "game_data): playing_time_mapping = { 30: '< 30min', 60: '30min - 1h', 120: '1-2h',", "__init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"] name = collection_data[\"name\"] if len(name)", "self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d", "\"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]])", "self.id = game_data[\"id\"] name = collection_data[\"name\"] if len(name) == 0: name = game_data[\"name\"]", "game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\" in", "articles: name = ' '.join(title[1:]) + \", \" + title[0] self.name = name", "in the list num_players[:-1] = [ player for player in num_players[:-1] if player[0][-1]", "game_data): if not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not", "g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight = self.calc_weight(game_data) self.weightRating", "game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles of Crime: The Millennium Series\") elif any(title", "self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating", "not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not game_data[\"rating\"]: return", "game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\"", "elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\" in game_titles:", "elif \"Small World Underground\" in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0,", "every expansion\"\"\" game = game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium", "return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data): sum = 0 total_votes = 0", "return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def", "since it's already handled elsewhere other_ranks = list(filter(lambda g: g[\"id\"] != \"1\" and", "in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\"", "upgrade this player count to remove the supported if support == \"supported\": num_players.append((expansion_num,", "\"Chronicles of Crime: The Millennium Series\") elif any(title in (\"King of Tokyo\", \"King", "\"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\" in game_titles: game_titles.insert(0, \"The Millennium", "game_data[\"id\"] name = collection_data[\"name\"] if len(name) == 0: name = game_data[\"name\"] alt_names =", "for player in num_players[:-1] if player[0][-1] != \"+\" and int(player[0]) < 14 ]", "self.reimplementedby = list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data,", "180: '2-3h', 240: '3-4h', } for playing_time_max, playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]:", "= game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list,", "expansion\"\"\" game = game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title", "are not the last in the list num_players[:-1] = [ player for player", "Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for", "self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def __eq__(self, other):", "list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name", "!= \"Not Ranked\", game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\",", "[ player for player in num_players[:-1] if player[0][-1] != \"+\" and int(player[0]) <", "other.id) def calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"]", "range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num > 0 and str(supported_num) not in [num", "suggested_age = 0 for player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum += int(player_age[\"age\"])", "Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small", "self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions", "articles = ['A', 'An', 'The'] class BoardGame: def __init__(self, game_data, collection_data, expansions=[], accessories=[]):", "= float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks =", "the BGG Rank, since it's already handled elsewhere other_ranks = list(filter(lambda g: g[\"id\"]", "self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data)", "BoardGame: def __init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"] name = collection_data[\"name\"]", "num_players[:-1] = [ player for player in num_players[:-1] if player[0][-1] != \"+\" and", "Grande Big Box if any(\"Big Box\" in title for title in game_titles): game_tmp", "5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data): sum = 0", "filter_other_ranks(self, game_data): # Remove the BGG Rank, since it's already handled elsewhere other_ranks", "= collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def __eq__(self, other): return", "def calc_numowned(self, game_data): if not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data):", "Remove \"+ player counts if they are not the last in the list", "Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning up", "self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data)", "title for title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp)", "this player count to remove the supported if support == \"supported\": num_players.append((expansion_num, \"exp_supported\"))", "# Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne Big Box 5, Alien", "g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players", "-1: \"Unknown\", 0: \"Light\", 1: \"Light\", 2: \"Light Medium\", 3: \"Medium\", 4: \"Medium", "expansions: for expansion_num, support in expansion.players: if expansion_num not in [num for num,", "in [num for num, _ in num_players]: num_players.append((str(supported_num), \"supported\")) # Add number of", "game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\") elif \"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\")", "'< 30min', 60: '30min - 1h', 120: '1-2h', 180: '2-3h', 240: '3-4h', }", "Andor\" in game_titles: game_titles.append(\"Die Legenden von Andor\") elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne", "\"Small World Underground\" in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven:", "supported_num > 0 and str(supported_num) not in [num for num, _ in num_players]:", "if not game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping = {", "self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age", "playing_time_mapping = { 30: '< 30min', 60: '30min - 1h', 120: '1-2h', 180:", "'%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id = collection_data[\"collection_id\"] def __hash__(self):", "in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination", "expansion.players: if expansion_num not in [num for num, _ in num_players]: #TODO another", "Box, El Grande Big Box if any(\"Big Box\" in title for title in", "player count to remove the supported if support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else:", "else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove \"+", "in game_titles: game_titles.append(\"Die Legenden von Andor\") elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\")", "Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not game_data[\"numowned\"]: return 0 return Decimal(game_data[\"numowned\"]) def calc_rating(self,", "= game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families = game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers =", "= collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def __eq__(self, other): return (self.__class__ == other.__class__", "def calc_average(self, game_data): if not game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data):", "in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories = accessories self.lastmodified =", "\"Rivals for Catan\" in game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten von Catan\")", "= name.split() if title[0] in articles: name = ' '.join(title[1:]) + \", \"", "game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\"", "Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\" in game_titles: game_titles.append(\"Small", "game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\" in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in", "von Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small World", "player counts if they are not the last in the list num_players[:-1] =", "game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles of Crime\" in game_titles: game_titles.insert(0, \"The Millennium Series\")", "re articles = ['A', 'An', 'The'] class BoardGame: def __init__(self, game_data, collection_data, expansions=[],", "_ in num_players]: #TODO another expansion may upgrade this player count to remove", "they are not the last in the list num_players[:-1] = [ player for", "\"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self,", "Das Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\" in game_titles:", "of Tokyo/King of New York\") elif \"Legends of Andor\" in game_titles: game_titles.append(\"Die Legenden", "self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order title = name.split() if", "game_data): if not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not", "calc_weight(self, game_data): weight_mapping = { -1: \"Unknown\", 0: \"Light\", 1: \"Light\", 2: \"Light", "if support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\")) num_players = sorted(num_players, key=lambda", "player for player in num_players[:-1] if player[0][-1] != \"+\" and int(player[0]) < 14", "calc_rating(self, game_data): if not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if", "# Remove \"+ player counts if they are not the last in the", "0: suggested_age = round(sum / total_votes, 2) return suggested_age def filter_other_ranks(self, game_data): #", "for player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count total_votes", "count = player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count total_votes += count if total_votes", "expansions=[], accessories=[]): self.id = game_data[\"id\"] name = collection_data[\"name\"] if len(name) == 0: name", "= game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families = game_data[\"families\"] self.artists =", "return suggested_age def filter_other_ranks(self, game_data): # Remove the BGG Rank, since it's already", "int(x[0].replace(\"+\", \"\"))) # Remove \"+ player counts if they are not the last", "player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count total_votes += count if total_votes > 0:", "if \"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories = accessories", "calc_average(self, game_data): if not game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping", "and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] =", "num_players def calc_playing_time(self, game_data): playing_time_mapping = { 30: '< 30min', 60: '30min -", "already handled elsewhere other_ranks = list(filter(lambda g: g[\"id\"] != \"1\" and g[\"value\"] !=", "game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das Duell\") elif", "#TODO another expansion may upgrade this player count to remove the supported if", "game_data[\"rank\"] == \"Not Ranked\": return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not", "Big Box if any(\"Big Box\" in title for title in game_titles): game_tmp =", "Tokyo\", \"King of New York\") for title in game_titles): game_titles.insert(0, \"King of Tokyo/New", "expansions for expansion in expansions: for expansion_num, support in expansion.players: if expansion_num not", "def __eq__(self, other): return (self.__class__ == other.__class__ and self.id == other.id) def calc_num_players(self,", "game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"integrates\"]])", "class BoardGame: def __init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id = game_data[\"id\"] name =", "for cleaning up linked items to remove duplicate data, such as the title", "from decimal import Decimal from datetime import datetime import html import re articles", "'2-3h', 240: '3-4h', } for playing_time_max, playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return", "Catan\") game_titles.append(\"Catan: Das Duell\") elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\"", "collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order title = name.split()", "in articles: name = ' '.join(title[1:]) + \", \" + title[0] self.name =", "in num_players]: num_players.append((str(supported_num), \"supported\")) # Add number of players from expansions for expansion", "order title = name.split() if title[0] in articles: name = ' '.join(title[1:]) +", "import html import re articles = ['A', 'An', 'The'] class BoardGame: def __init__(self,", "if any(\"Big Box\" in title for title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\",", "= self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order title", "game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([ game[\"name\"]", "self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image", "# Carcassonne Big Box 5, Alien Frontiers Big Box, El Grande Big Box", "= list(filter(lambda g: g[\"id\"] != \"1\" and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for", "3: \"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def", "in num_players[:-1] if player[0][-1] != \"+\" and int(player[0]) < 14 ] return num_players", "for playing_time_max, playing_time in playing_time_mapping.items(): if not game_data[\"playing_time\"]: return 'Unknown' if playing_time_max >", "not the last in the list num_players[:-1] = [ player for player in", "self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order title =", "to remove duplicate data, such as the title being repeated on every expansion\"\"\"", "> 0: suggested_age = round(sum / total_votes, 2) return suggested_age def filter_other_ranks(self, game_data):", "= list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates", "not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not game_data[\"numowned\"]: return", "Series\") game_titles.insert(0, \"Chronicles of Crime: The Millennium Series\") elif any(title in (\"King of", "of Andor\" in game_titles: game_titles.append(\"Die Legenden von Andor\") elif \"No Thanks!\" in game_titles:", "Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def calc_weight(self,", "!= \"+\" and int(player[0]) < 14 ] return num_players def calc_playing_time(self, game_data): playing_time_mapping", "= 0 suggested_age = 0 for player_age in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum", "sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove \"+ player counts if they are", "in expansions: for expansion_num, support in expansion.players: if expansion_num not in [num for", "= expansions self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"]", "num_players[:-1] if player[0][-1] != \"+\" and int(player[0]) < 14 ] return num_players def", "Alien Frontiers Big Box, El Grande Big Box if any(\"Big Box\" in title", "the list, keeping order title = name.split() if title[0] in articles: name =", "self.families = game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements", "Rank, since it's already handled elsewhere other_ranks = list(filter(lambda g: g[\"id\"] != \"1\"", "keeping order title = name.split() if title[0] in articles: name = ' '.join(title[1:])", "\"King of Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King of New York\") elif \"Legends", "\"Unforgiven: The Lincoln Assassination Trial\") elif \"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"])", "return playing_time return '> 4h' def calc_rank(self, game_data): if not game_data[\"rank\"] or game_data[\"rank\"]", "game_data[\"name\"] game_titles = [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short", "game_titles.append(\"Rokoko\") elif \"Small World Underground\" in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles:", "float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data)", "Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplements\"]]) #game_titles.extend([", "not game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\": return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self,", "= game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby = list(filter(lambda g: not", "(\"King of Tokyo\", \"King of New York\") for title in game_titles): game_titles.insert(0, \"King", "or collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"]", "self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year =", "\"Unknown\", 0: \"Light\", 1: \"Light\", 2: \"Light Medium\", 3: \"Medium\", 4: \"Medium Heavy\",", "counts if they are not the last in the list num_players[:-1] = [", "0: name = game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe", "= game_data[\"contained\"] self.families = game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers =", "game_data): if not game_data[\"average\"]: return None return Decimal(game_data[\"average\"]) def calc_weight(self, game_data): weight_mapping =", "title[0] in articles: name = ' '.join(title[1:]) + \", \" + title[0] self.name", "Trial\") elif \"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for game", "Carcassonne Big Box 5, Alien Frontiers Big Box, El Grande Big Box if", "Medium\", 3: \"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))]", "self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda g: g[\"inbound\"], game_data[\"reimplements\"])) self.reimplementedby", "self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data)", "game_data[\"families\"] self.artists = game_data[\"artists\"] self.designers = game_data[\"designers\"] self.publishers = game_data[\"publishers\"] self.reimplements = list(filter(lambda", "self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated", "game in game_data[\"reimplements\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([ game[\"name\"] for game", "game_data): sum = 0 total_votes = 0 suggested_age = 0 for player_age in", "other.__class__ and self.id == other.id) def calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for", "Underground\" in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln", "} return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data): sum = 0 total_votes =", "\" + title[0] self.name = name self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics", "if not game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\": return None return Decimal(game_data[\"rank\"]) def", "for i, rank in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks", "in game_data[\"suggested_playerages\"]: count = player_age[\"numvotes\"] sum += int(player_age[\"age\"]) * count total_votes += count", "self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"]", "game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King of New York\") elif", "remove duplicate data, such as the title being repeated on every expansion\"\"\" game", "!= \"1\" and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for i, rank in enumerate(other_ranks):", "list, keeping order title = name.split() if title[0] in articles: name = '", "any(title in (\"King of Tokyo\", \"King of New York\") for title in game_titles):", "game_data): if not game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\": return None return Decimal(game_data[\"rank\"])", "> int(game_data[\"playing_time\"]): return playing_time return '> 4h' def calc_rank(self, game_data): if not game_data[\"rank\"]", "= collection_data[\"name\"] if len(name) == 0: name = game_data[\"name\"] alt_names = self.gen_name_list(game_data, collection_data)", "\"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\" in game_titles: game_titles.append(\"Small World\") elif", "self.collection_id = collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def __eq__(self, other): return (self.__class__ ==", "# Add number of players from expansions for expansion in expansions: for expansion_num,", "name.split() if title[0] in articles: name = ' '.join(title[1:]) + \", \" +", "self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment = collection_data[\"comment\"]", "for num, _ in num_players]: #TODO another expansion may upgrade this player count", "= html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families =", "\"expansion\")) num_players = sorted(num_players, key=lambda x: int(x[0].replace(\"+\", \"\"))) # Remove \"+ player counts", "'1-2h', 180: '2-3h', 240: '3-4h', } for playing_time_max, playing_time in playing_time_mapping.items(): if not", "not in [num for num, _ in num_players]: #TODO another expansion may upgrade", "other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules", "/ total_votes, 2) return suggested_age def filter_other_ranks(self, game_data): # Remove the BGG Rank,", "game_titles.insert(0, \"Chronicles of Crime: The Millennium Series\") elif any(title in (\"King of Tokyo\",", "title in game_titles): game_titles.insert(0, \"King of Tokyo/New York\") game_titles.insert(0, \"King of Tokyo/King of", "game_titles.append(\"Die Legenden von Andor\") elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power", "collection_data[\"tags\"] self.comment = collection_data[\"comment\"] self.wishlist_comment = collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players =", "game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\") elif \"Rivals for Catan\" in game_titles:", "int(player[0]) < 14 ] return num_players def calc_playing_time(self, game_data): playing_time_mapping = { 30:", "of Tokyo\", \"King of New York\") for title in game_titles): game_titles.insert(0, \"King of", "= collection_data[\"numplays\"] self.image = collection_data[\"image_version\"] or collection_data[\"image\"] or game_data[\"image\"] self.tags = collection_data[\"tags\"] self.comment", "De-dupe the list, keeping order title = name.split() if title[0] in articles: name", "in game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\" in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\"", "elif \"Rococo\" in game_titles: game_titles.append(\"Rokoko\") elif \"Small World Underground\" in game_titles: game_titles.append(\"Small World\")", "\"Light Medium\", 3: \"Medium\", 4: \"Medium Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or", "= [] game_titles.append(collection_data[\"name\"]) game_titles.append(game) game_titles.append(game.split(\"–\")[0].strip()) # Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip())", "title = name.split() if title[0] in articles: name = ' '.join(title[1:]) + \",", "total_votes, 2) return suggested_age def filter_other_ranks(self, game_data): # Remove the BGG Rank, since", "any(\"Big Box\" in title for title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\",", "game_titles: game_titles.insert(0, \"Unforgiven: The Lincoln Assassination Trial\") elif \"Viticulture Essential Edition\" in game_titles:", "= self.calc_playing_time(game_data) self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned =", "alt_names = self.gen_name_list(game_data, collection_data) self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order", "= self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age =", "if expansion_num not in [num for num, _ in num_players]: #TODO another expansion", "Box if any(\"Big Box\" in title for title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big", "collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories = accessories self.lastmodified = datetime.strptime(collection_data[\"last_modified\"],", "self.weight = self.calc_weight(game_data) self.weightRating = float(game_data[\"weight\"]) self.year = game_data[\"year\"] self.playing_time = self.calc_playing_time(game_data) self.rank", "num_players = game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num >", "self.rank = self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average", "in game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles of Crime: The Millennium Series\")", "support in expansion.players: if expansion_num not in [num for num, _ in num_players]:", "= { -1: \"Unknown\", 0: \"Light\", 1: \"Light\", 2: \"Light Medium\", 3: \"Medium\",", "and self.id == other.id) def calc_num_players(self, game_data, expansions): num_players = game_data[\"suggested_numplayers\"].copy() for supported_num", "['A', 'An', 'The'] class BoardGame: def __init__(self, game_data, collection_data, expansions=[], accessories=[]): self.id =", "self.lastmodified = datetime.strptime(collection_data[\"last_modified\"], '%Y-%m-%d %H:%M:%S') self.version_name = collection_data[\"version_name\"] self.version_year = collection_data[\"version_year\"] self.collection_id =", "= collection_data[\"wishlist_comment\"] if \"players\" in collection_data: self.previous_players = list(set(collection_data[\"players\"])) self.expansions = expansions self.accessories", "'> 4h' def calc_rank(self, game_data): if not game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\":", "handled elsewhere other_ranks = list(filter(lambda g: g[\"id\"] != \"1\" and g[\"value\"] != \"Not", "if not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if not game_data[\"numowned\"]:", "rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data, collection_data): \"\"\"rules for cleaning up linked items", "collection_data[\"collection_id\"] def __hash__(self): return hash(self.id) def __eq__(self, other): return (self.__class__ == other.__class__ and", "0: \"Light\", 1: \"Light\", 2: \"Light Medium\", 3: \"Medium\", 4: \"Medium Heavy\", 5:", "<filename>scripts/mybgg/models.py from decimal import Decimal from datetime import datetime import html import re", "= self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays = collection_data[\"numplays\"] self.image =", "\"supported\")) # Add number of players from expansions for expansion in expansions: for", "Heavy\", 5: \"Heavy\", } return weight_mapping[round(Decimal(game_data[\"weight\"] or -1))] def calc_suggested_age(self, game_data): sum =", "title in game_titles): game_tmp = re.sub(r\"\\s*\\(?Big Box.*\", \"\", game, flags=re.IGNORECASE) game_titles.append(game_tmp) if \"Chronicles", "= self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data) self.numplays =", "self.calc_numowned(game_data) self.average = self.calc_average(game_data) self.rating = self.calc_rating(game_data) self.minage = game_data[\"min_age\"] self.suggested_age = self.calc_suggested_age(game_data)", "html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained = game_data[\"contained\"] self.families = game_data[\"families\"]", "return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def", "Box 5, Alien Frontiers Big Box, El Grande Big Box if any(\"Big Box\"", "= self.calc_rank(game_data) self.other_ranks = self.filter_other_ranks(game_data) self.usersrated = self.calc_usersrated(game_data) self.numowned = self.calc_numowned(game_data) self.average =", "Assassination Trial\") elif \"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"] for", "g: not g[\"inbound\"], game_data[\"reimplements\"])) self.integrates = game_data[\"integrates\"] self.players = self.calc_num_players(game_data, expansions) self.weight =", "to remove the supported if support == \"supported\": num_players.append((expansion_num, \"exp_supported\")) else: num_players.append((expansion_num, \"expansion\"))", "calc_playing_time(self, game_data): playing_time_mapping = { 30: '< 30min', 60: '30min - 1h', 120:", "game_titles.append(game_tmp) if \"Chronicles of Crime\" in game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles", "Legenden von Andor\") elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid", "Catan\" in game_titles: game_titles.append(\"The Rivals for Catan\") game_titles.append(\"Die Fürsten von Catan\") game_titles.append(\"Catan: Das", "game_data[\"playing_time\"]: return 'Unknown' if playing_time_max > int(game_data[\"playing_time\"]): return playing_time return '> 4h' def", "game_data): if not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not", "120: '1-2h', 180: '2-3h', 240: '3-4h', } for playing_time_max, playing_time in playing_time_mapping.items(): if", "= name self.description = html.unescape(game_data[\"description\"]) self.categories = game_data[\"categories\"] self.mechanics = game_data[\"mechanics\"] self.contained =", "g: g[\"id\"] != \"1\" and g[\"value\"] != \"Not Ranked\", game_data[\"other_ranks\"])) for i, rank", "von Andor\") elif \"No Thanks!\" in game_titles: game_titles.append(\"Schöne Sch#!?e\") elif \"Power Grid Deluxe\"", "Lincoln Assassination Trial\") elif \"Viticulture Essential Edition\" in game_titles: game_titles.append(\"Viticulture\") game_titles.extend(game_data[\"alternate_names\"]) #game_titles.extend([ game[\"name\"]", "{ 30: '< 30min', 60: '30min - 1h', 120: '1-2h', 180: '2-3h', 240:", "elif \"Power Grid Deluxe\" in game_titles: game_titles.append(\"Power Grid\") elif \"Queendomino\" in game_titles: game_titles.append(\"Kingdomino\")", "calc_usersrated(self, game_data): if not game_data[\"usersrated\"]: return 0 return Decimal(game_data[\"usersrated\"]) def calc_numowned(self, game_data): if", "in enumerate(other_ranks): other_ranks[i][\"friendlyname\"] = re.sub(\" Rank\", \"\", rank[\"friendlyname\"]) return other_ranks def gen_name_list(self, game_data,", "sum += int(player_age[\"age\"]) * count total_votes += count if total_votes > 0: suggested_age", "num_players]: num_players.append((str(supported_num), \"supported\")) # Add number of players from expansions for expansion in", "as the title being repeated on every expansion\"\"\" game = game_data[\"name\"] game_titles =", "\"Chronicles of Crime\" in game_titles: game_titles.insert(0, \"The Millennium Series\") game_titles.insert(0, \"Chronicles of Crime:", "game[\"name\"] for game in game_data[\"reimplementedby\"]]) #game_titles.extend([ game[\"name\"] for game in game_data[\"integrates\"]]) return game_titles", "list num_players[:-1] = [ player for player in num_players[:-1] if player[0][-1] != \"+\"", "it's already handled elsewhere other_ranks = list(filter(lambda g: g[\"id\"] != \"1\" and g[\"value\"]", "= game_data[\"suggested_numplayers\"].copy() for supported_num in range(game_data[\"min_players\"], game_data[\"max_players\"] + 1): if supported_num > 0", "elsewhere other_ranks = list(filter(lambda g: g[\"id\"] != \"1\" and g[\"value\"] != \"Not Ranked\",", "game_data[\"rank\"] or game_data[\"rank\"] == \"Not Ranked\": return None return Decimal(game_data[\"rank\"]) def calc_usersrated(self, game_data):", "if not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not game_data[\"average\"]:", "Medium Title game_titles.append(game.split(\":\")[0].strip()) # Short Title game_titles.append(game.split(\"(\")[0].strip()) # No Edition # Carcassonne Big", "return Decimal(game_data[\"numowned\"]) def calc_rating(self, game_data): if not game_data[\"rating\"]: return None return Decimal(game_data[\"rating\"]) def", "+ \", \" + title[0] self.name = name self.description = html.unescape(game_data[\"description\"]) self.categories =", "return None return Decimal(game_data[\"rating\"]) def calc_average(self, game_data): if not game_data[\"average\"]: return None return", "name = ' '.join(title[1:]) + \", \" + title[0] self.name = name self.description", "World Underground\" in game_titles: game_titles.append(\"Small World\") elif \"Unforgiven\" in game_titles: game_titles.insert(0, \"Unforgiven: The" ]
[]
[ "# 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids = [11, 12, 13, 14, 15, 16,", "多个文件中要用到的函数之类的统一写在这里 from skimage.measure import label import numpy as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留", "top = output.max() area_list = [] for i in range(1, top + 1):", "parameters.') total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print(f'{total_trainable_params:,} training parameters.')", "(output_id * output_other).sum(1) + 0.001 if (inter / output_id_area) >= 0.4: refine[:, :,", "top + 1): area = len(np.where(output == i)[0]) area_list.append(area) max_area = max(area_list) max_index", "def get_model_params(net): total_params = sum(p.numel() for p in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params", "label(output) top = output.max() area_list = [] for i in range(1, top +", "2000: return refine else: refine[output == max_index + 1] = 1 if top", "(inter / output_id_area) >= 0.4: refine[:, :, index] = 0 if (inter /", "< 2000: return refine else: refine[output == max_index + 1] = 1 if", "inter = (output_id * output_other).sum(1) + 0.001 if (inter / output_id_area) >= 0.4:", "14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28,", "if len(np.where(output > 0)[0]) > 0: output = label(output) top = output.max() area_list", "output_id_area) >= 0.4: refine[:, :, index] = 0 if (inter / output_other_area) >=", "26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 41, 42,", "= output_id.sum(1) + 0.001 refine = output_all if index <= 29: end =", "+ 1): area = len(np.where(output == i)[0]) area_list.append(area) max_area = max(area_list) max_index =", "refine[output == second_max_index + 1] = 1 return refine else: return refine else:", "= output_other.sum(1) + 0.001 inter = (output_id * output_other).sum(1) + 0.001 if (inter", "return refine else: return refine else: return refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果", "44, 45, 46, 47, 48] index = ids.index(id) output_id = output_all[:, :, index].reshape(1,", "index] = 0 if (inter / output_other_area) >= 0.4: refine[:, :, i] =", "in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1, -1)", "> 0)[0]) > 0: output = label(output) top = output.max() area_list = []", "= 0 if (inter / output_other_area) >= 0.4: refine[:, :, i] = 0", "refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel() for p in net.parameters()) print(f'{total_params:,}", "= output_all[:, :, i].reshape(1, -1) output_other_area = output_other.sum(1) + 0.001 inter = (output_id", "0: output = label(output) top = output.max() area_list = [] for i in", "= output_all if index <= 29: end = index + 3 elif index", "ids = [11, 12, 13, 14, 15, 16, 17, 18, 21, 22, 23,", "else: end = index + 1 # 最后一颗牙不用再计算重叠率了 for i in range(index +", "len(np.where(output == i)[0]) area_list.append(area) max_area = max(area_list) max_index = area_list.index(max_area) if max_area <", "43, 44, 45, 46, 47, 48] index = ids.index(id) output_id = output_all[:, :,", "30: # 倒数第二颗牙前面只有一颗牙 end = index + 2 else: end = index +", "index <= 29: end = index + 3 elif index == 30: #", "total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print(f'{total_trainable_params:,} training parameters.') print()", "output_id.sum(1) + 0.001 refine = output_all if index <= 29: end = index", "max_area < 2000: return refine else: refine[output == max_index + 1] = 1", "np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8) if", "import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output", "output_all): ids = [11, 12, 13, 14, 15, 16, 17, 18, 21, 22,", "25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 41,", ":, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001 refine = output_all", "net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad)", "32, 33, 34, 35, 36, 37, 38, 41, 42, 43, 44, 45, 46,", "end = index + 2 else: end = index + 1 # 最后一颗牙不用再计算重叠率了", "refine = np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output > 0)[0]) > 0: output =", "max_index + 1] = 1 if top > 1: temp_list = copy.deepcopy(area_list) del", "= 1 return refine else: return refine else: return refine else: return refine", "i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1,", "i] = 0 return refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel() for", "> 1: temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area)", "index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001 refine = output_all if", "numpy as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280, 2440),", "= max(area_list) max_index = area_list.index(max_area) if max_area < 2000: return refine else: refine[output", "import numpy as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280,", "/ second_max_area) < 1.2: refine[output == second_max_index + 1] = 1 return refine", "return refine else: refine[output == max_index + 1] = 1 if top >", "1] = 1 return refine else: return refine else: return refine else: return", "24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38,", "34, 35, 36, 37, 38, 41, 42, 43, 44, 45, 46, 47, 48]", "return refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel() for p in net.parameters())", "for p in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel() for p in", "refine[output == max_index + 1] = 1 if top > 1: temp_list =", "== 30: # 倒数第二颗牙前面只有一颗牙 end = index + 2 else: end = index", "dtype=np.uint8) if len(np.where(output > 0)[0]) > 0: output = label(output) top = output.max()", "16, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32,", "max_index = area_list.index(max_area) if max_area < 2000: return refine else: refine[output == max_index", "else: refine[output == max_index + 1] = 1 if top > 1: temp_list", "15, 16, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 31,", "for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :,", "27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 41, 42, 43,", "# 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output > 0)[0])", "+ 3 elif index == 30: # 倒数第二颗牙前面只有一颗牙 end = index + 2", "* output_other).sum(1) + 0.001 if (inter / output_id_area) >= 0.4: refine[:, :, index]", "36, 37, 38, 41, 42, 43, 44, 45, 46, 47, 48] index =", "47, 48] index = ids.index(id) output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果", "output_all if index <= 29: end = index + 3 elif index ==", "= [] for i in range(1, top + 1): area = len(np.where(output ==", "from skimage.measure import label import numpy as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def", "= sum(p.numel() for p in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel() for", "refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output > 0)[0]) > 0: output", "1: temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area) if", "output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001 refine =", "p in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel() for p in net.parameters()", "# 多个文件中要用到的函数之类的统一写在这里 from skimage.measure import label import numpy as np import copy #", "second_max_index = area_list.index(second_max_area) if (max_area / second_max_area) < 1.2: refine[output == second_max_index +", "def judge_overlap(id, output_all): ids = [11, 12, 13, 14, 15, 16, 17, 18,", "as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8)", "42, 43, 44, 45, 46, 47, 48] index = ids.index(id) output_id = output_all[:,", "if (inter / output_id_area) >= 0.4: refine[:, :, index] = 0 if (inter", "输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel() for p in net.parameters()) print(f'{total_params:,} total parameters.')", "1 return refine else: return refine else: return refine else: return refine #", "= index + 1 # 最后一颗牙不用再计算重叠率了 for i in range(index + 1, end):", "(inter / output_other_area) >= 0.4: refine[:, :, i] = 0 return refine #", "judge_overlap(id, output_all): ids = [11, 12, 13, 14, 15, 16, 17, 18, 21,", "i].reshape(1, -1) output_other_area = output_other.sum(1) + 0.001 inter = (output_id * output_other).sum(1) +", "-1) output_other_area = output_other.sum(1) + 0.001 inter = (output_id * output_other).sum(1) + 0.001", "index = ids.index(id) output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area =", "index == 30: # 倒数第二颗牙前面只有一颗牙 end = index + 2 else: end =", ">= 0.4: refine[:, :, index] = 0 if (inter / output_other_area) >= 0.4:", "refine[:, :, i] = 0 return refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params =", "total_params = sum(p.numel() for p in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel()", "+ 1] = 1 if top > 1: temp_list = copy.deepcopy(area_list) del temp_list[max_index]", "second_max_area) < 1.2: refine[output == second_max_index + 1] = 1 return refine else:", "del temp_list[max_index] second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area / second_max_area) <", "1): area = len(np.where(output == i)[0]) area_list.append(area) max_area = max(area_list) max_index = area_list.index(max_area)", "= area_list.index(second_max_area) if (max_area / second_max_area) < 1.2: refine[output == second_max_index + 1]", "23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37,", "max_area = max(area_list) max_index = area_list.index(max_area) if max_area < 2000: return refine else:", "output_other = output_all[:, :, i].reshape(1, -1) output_other_area = output_other.sum(1) + 0.001 inter =", "output_all[:, :, i].reshape(1, -1) output_other_area = output_other.sum(1) + 0.001 inter = (output_id *", "output_other_area = output_other.sum(1) + 0.001 inter = (output_id * output_other).sum(1) + 0.001 if", "0.001 if (inter / output_id_area) >= 0.4: refine[:, :, index] = 0 if", "output_other.sum(1) + 0.001 inter = (output_id * output_other).sum(1) + 0.001 if (inter /", "elif index == 30: # 倒数第二颗牙前面只有一颗牙 end = index + 2 else: end", "if (max_area / second_max_area) < 1.2: refine[output == second_max_index + 1] = 1", "i in range(1, top + 1): area = len(np.where(output == i)[0]) area_list.append(area) max_area", ">= 0.4: refine[:, :, i] = 0 return refine # 输入一个模型,获得其参数量 def get_model_params(net):", "refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids = [11, 12,", "area = len(np.where(output == i)[0]) area_list.append(area) max_area = max(area_list) max_index = area_list.index(max_area) if", "def refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output > 0)[0]) > 0:", "copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area / second_max_area)", "return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids = [11, 12, 13, 14,", "/ output_other_area) >= 0.4: refine[:, :, i] = 0 return refine # 输入一个模型,获得其参数量", "= output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001 refine", "== second_max_index + 1] = 1 return refine else: return refine else: return", "如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output > 0)[0]) >", "22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36,", "== i)[0]) area_list.append(area) max_area = max(area_list) max_index = area_list.index(max_area) if max_area < 2000:", "if index <= 29: end = index + 3 elif index == 30:", "# 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001 refine = output_all if index <=", "1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1, -1) output_other_area = output_other.sum(1)", "in range(1, top + 1): area = len(np.where(output == i)[0]) area_list.append(area) max_area =", "output_id_area = output_id.sum(1) + 0.001 refine = output_all if index <= 29: end", "= np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output > 0)[0]) > 0: output = label(output)", "< 1.2: refine[output == second_max_index + 1] = 1 return refine else: return", "ids.index(id) output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) +", "end = index + 1 # 最后一颗牙不用再计算重叠率了 for i in range(index + 1,", "倒数第二颗牙前面只有一颗牙 end = index + 2 else: end = index + 1 #", "return refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids = [11,", "temp_list[max_index] second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area / second_max_area) < 1.2:", "label import numpy as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine =", ":, i].reshape(1, -1) output_other_area = output_other.sum(1) + 0.001 inter = (output_id * output_other).sum(1)", "else: return refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids =", "21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35,", "2 else: end = index + 1 # 最后一颗牙不用再计算重叠率了 for i in range(index", "in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel() for p in net.parameters() if", "area_list.index(max_area) if max_area < 2000: return refine else: refine[output == max_index + 1]", "+ 1 # 最后一颗牙不用再计算重叠率了 for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗", "import label import numpy as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine", "45, 46, 47, 48] index = ids.index(id) output_id = output_all[:, :, index].reshape(1, -1)", "-1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001 refine = output_all if index", "= (output_id * output_other).sum(1) + 0.001 if (inter / output_id_area) >= 0.4: refine[:,", "output.max() area_list = [] for i in range(1, top + 1): area =", "refine[:, :, index] = 0 if (inter / output_other_area) >= 0.4: refine[:, :,", "= area_list.index(max_area) if max_area < 2000: return refine else: refine[output == max_index +", "13, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 26, 27,", "end = index + 3 elif index == 30: # 倒数第二颗牙前面只有一颗牙 end =", ":, i] = 0 return refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel()", "temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area", "+ 2 else: end = index + 1 # 最后一颗牙不用再计算重叠率了 for i in", "12, 13, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 26,", "total parameters.') total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print(f'{total_trainable_params:,} training", "max(area_list) max_index = area_list.index(max_area) if max_area < 2000: return refine else: refine[output ==", "> 0: output = label(output) top = output.max() area_list = [] for i", "range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1, -1) output_other_area", "for i in range(1, top + 1): area = len(np.where(output == i)[0]) area_list.append(area)", "33, 34, 35, 36, 37, 38, 41, 42, 43, 44, 45, 46, 47,", "return refine else: return refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all):", "second_max_index + 1] = 1 return refine else: return refine else: return refine", "+ 0.001 inter = (output_id * output_other).sum(1) + 0.001 if (inter / output_id_area)", "area_list.append(area) max_area = max(area_list) max_index = area_list.index(max_area) if max_area < 2000: return refine", "print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print(f'{total_trainable_params:,}", "# 输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel() for p in net.parameters()) print(f'{total_params:,} total", "/ output_id_area) >= 0.4: refine[:, :, index] = 0 if (inter / output_other_area)", "refine = output_all if index <= 29: end = index + 3 elif", "output_other).sum(1) + 0.001 if (inter / output_id_area) >= 0.4: refine[:, :, index] =", "= len(np.where(output == i)[0]) area_list.append(area) max_area = max(area_list) max_index = area_list.index(max_area) if max_area", "+ 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1, -1) output_other_area =", "[] for i in range(1, top + 1): area = len(np.where(output == i)[0])", "# 倒数第二颗牙前面只有一颗牙 end = index + 2 else: end = index + 1", "如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids = [11, 12, 13, 14, 15, 16, 17,", "output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001", "37, 38, 41, 42, 43, 44, 45, 46, 47, 48] index = ids.index(id)", "[11, 12, 13, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25,", "38, 41, 42, 43, 44, 45, 46, 47, 48] index = ids.index(id) output_id", "每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1) + 0.001 refine = output_all if index <= 29:", "refine else: return refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids", "copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output): refine = np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output >", "refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids = [11, 12, 13, 14, 15,", "max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area / second_max_area) < 1.2: refine[output == second_max_index", "else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id, output_all): ids = [11, 12, 13,", "# 最后一颗牙不用再计算重叠率了 for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other =", "output = label(output) top = output.max() area_list = [] for i in range(1,", "= output.max() area_list = [] for i in range(1, top + 1): area", "<= 29: end = index + 3 elif index == 30: # 倒数第二颗牙前面只有一颗牙", "second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area / second_max_area) < 1.2: refine[output", "= ids.index(id) output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area = output_id.sum(1)", "get_model_params(net): total_params = sum(p.numel() for p in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params =", "48] index = ids.index(id) output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果 output_id_area", "top > 1: temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list) second_max_index =", "2440), dtype=np.uint8) if len(np.where(output > 0)[0]) > 0: output = label(output) top =", "if (inter / output_other_area) >= 0.4: refine[:, :, i] = 0 return refine", "range(1, top + 1): area = len(np.where(output == i)[0]) area_list.append(area) max_area = max(area_list)", "41, 42, 43, 44, 45, 46, 47, 48] index = ids.index(id) output_id =", "area_list.index(second_max_area) if (max_area / second_max_area) < 1.2: refine[output == second_max_index + 1] =", "index + 1 # 最后一颗牙不用再计算重叠率了 for i in range(index + 1, end): #", "最后一颗牙不用再计算重叠率了 for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:,", "1 if top > 1: temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list)", "= max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area / second_max_area) < 1.2: refine[output ==", "28, 31, 32, 33, 34, 35, 36, 37, 38, 41, 42, 43, 44,", "+ 0.001 refine = output_all if index <= 29: end = index +", "0.001 inter = (output_id * output_other).sum(1) + 0.001 if (inter / output_id_area) >=", "1.2: refine[output == second_max_index + 1] = 1 return refine else: return refine", "+ 1] = 1 return refine else: return refine else: return refine else:", "每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1, -1) output_other_area = output_other.sum(1) + 0.001 inter", "output_other_area) >= 0.4: refine[:, :, i] = 0 return refine # 输入一个模型,获得其参数量 def", "i)[0]) area_list.append(area) max_area = max(area_list) max_index = area_list.index(max_area) if max_area < 2000: return", "if top > 1: temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list) second_max_index", "= copy.deepcopy(area_list) del temp_list[max_index] second_max_area = max(temp_list) second_max_index = area_list.index(second_max_area) if (max_area /", "skimage.measure import label import numpy as np import copy # 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留 def refine_output(output):", "index + 2 else: end = index + 1 # 最后一颗牙不用再计算重叠率了 for i", "1] = 1 if top > 1: temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area", "46, 47, 48] index = ids.index(id) output_id = output_all[:, :, index].reshape(1, -1) #", "29: end = index + 3 elif index == 30: # 倒数第二颗牙前面只有一颗牙 end", "3 elif index == 30: # 倒数第二颗牙前面只有一颗牙 end = index + 2 else:", "18, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34,", "= label(output) top = output.max() area_list = [] for i in range(1, top", "0 return refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel() for p in", "sum(p.numel() for p in net.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum(p.numel() for p", "if max_area < 2000: return refine else: refine[output == max_index + 1] =", "0 if (inter / output_other_area) >= 0.4: refine[:, :, i] = 0 return", "= 0 return refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params = sum(p.numel() for p", "0.4: refine[:, :, index] = 0 if (inter / output_other_area) >= 0.4: refine[:,", "np.zeros((1280, 2440), dtype=np.uint8) if len(np.where(output > 0)[0]) > 0: output = label(output) top", "0.4: refine[:, :, i] = 0 return refine # 输入一个模型,获得其参数量 def get_model_params(net): total_params", "end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1, -1) output_other_area = output_other.sum(1) +", "= index + 3 elif index == 30: # 倒数第二颗牙前面只有一颗牙 end = index", "= index + 2 else: end = index + 1 # 最后一颗牙不用再计算重叠率了 for", "len(np.where(output > 0)[0]) > 0: output = label(output) top = output.max() area_list =", "0.001 refine = output_all if index <= 29: end = index + 3", "refine else: return refine else: return refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def", "== max_index + 1] = 1 if top > 1: temp_list = copy.deepcopy(area_list)", "= 1 if top > 1: temp_list = copy.deepcopy(area_list) del temp_list[max_index] second_max_area =", "(max_area / second_max_area) < 1.2: refine[output == second_max_index + 1] = 1 return", "= [11, 12, 13, 14, 15, 16, 17, 18, 21, 22, 23, 24,", "17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33,", "refine else: refine[output == max_index + 1] = 1 if top > 1:", "0)[0]) > 0: output = label(output) top = output.max() area_list = [] for", "area_list = [] for i in range(1, top + 1): area = len(np.where(output", "else: return refine else: return refine else: return refine # 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果 def judge_overlap(id,", "# 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other = output_all[:, :, i].reshape(1, -1) output_other_area = output_other.sum(1) + 0.001", "1 # 最后一颗牙不用再计算重叠率了 for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗 output_other", ":, index] = 0 if (inter / output_other_area) >= 0.4: refine[:, :, i]", "31, 32, 33, 34, 35, 36, 37, 38, 41, 42, 43, 44, 45,", "+ 0.001 if (inter / output_id_area) >= 0.4: refine[:, :, index] = 0", "35, 36, 37, 38, 41, 42, 43, 44, 45, 46, 47, 48] index", "index + 3 elif index == 30: # 倒数第二颗牙前面只有一颗牙 end = index +" ]
[ "class Spam(Structure): pass if __name__ == \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name, spam_0.price)", "Spam(Structure): pass if __name__ == \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name, spam_0.price) spam_1", "signature import Structure, add_signature class SpamTheOldWay: def __init__(self, name, price): self.name = name", "import Structure, add_signature class SpamTheOldWay: def __init__(self, name, price): self.name = name self.price", "__name__ == \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name, spam_0.price) spam_1 = Spam(\"hughluo\", 42)", "def __init__(self, name, price): self.name = name self.price = price @add_signature(\"name\", \"price\") class", "pass if __name__ == \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name, spam_0.price) spam_1 =", "SpamTheOldWay: def __init__(self, name, price): self.name = name self.price = price @add_signature(\"name\", \"price\")", "\"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name, spam_0.price) spam_1 = Spam(\"hughluo\", 42) print(spam_1.name, spam_1.price)", "name, price): self.name = name self.price = price @add_signature(\"name\", \"price\") class Spam(Structure): pass", "name self.price = price @add_signature(\"name\", \"price\") class Spam(Structure): pass if __name__ == \"__main__\":", "== \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name, spam_0.price) spam_1 = Spam(\"hughluo\", 42) print(spam_1.name,", "Structure, add_signature class SpamTheOldWay: def __init__(self, name, price): self.name = name self.price =", "\"price\") class Spam(Structure): pass if __name__ == \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name,", "from signature import Structure, add_signature class SpamTheOldWay: def __init__(self, name, price): self.name =", "if __name__ == \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\") print(spam_0.name, spam_0.price) spam_1 = Spam(\"hughluo\",", "@add_signature(\"name\", \"price\") class Spam(Structure): pass if __name__ == \"__main__\": spam_0 = Spam(price=0.618, name=\"wexort\")", "self.price = price @add_signature(\"name\", \"price\") class Spam(Structure): pass if __name__ == \"__main__\": spam_0", "self.name = name self.price = price @add_signature(\"name\", \"price\") class Spam(Structure): pass if __name__", "= price @add_signature(\"name\", \"price\") class Spam(Structure): pass if __name__ == \"__main__\": spam_0 =", "add_signature class SpamTheOldWay: def __init__(self, name, price): self.name = name self.price = price", "price @add_signature(\"name\", \"price\") class Spam(Structure): pass if __name__ == \"__main__\": spam_0 = Spam(price=0.618,", "= name self.price = price @add_signature(\"name\", \"price\") class Spam(Structure): pass if __name__ ==", "class SpamTheOldWay: def __init__(self, name, price): self.name = name self.price = price @add_signature(\"name\",", "__init__(self, name, price): self.name = name self.price = price @add_signature(\"name\", \"price\") class Spam(Structure):", "price): self.name = name self.price = price @add_signature(\"name\", \"price\") class Spam(Structure): pass if" ]
[ "hash_filters import * from list_filters import * from version_filters import * from string_filters", "import * from dict_filters import * from hash_filters import * from list_filters import", "from list_filters import * from version_filters import * from string_filters import * from", "bool_filters import * from dict_filters import * from hash_filters import * from list_filters", "dict_filters import * from hash_filters import * from list_filters import * from version_filters", "import * from list_filters import * from version_filters import * from string_filters import", "import * from version_filters import * from string_filters import * from datetime_filters import", "* from dict_filters import * from hash_filters import * from list_filters import *", "* from list_filters import * from version_filters import * from string_filters import *", "list_filters import * from version_filters import * from string_filters import * from datetime_filters", "from hash_filters import * from list_filters import * from version_filters import * from", "* from version_filters import * from string_filters import * from datetime_filters import *", "from bool_filters import * from dict_filters import * from hash_filters import * from", "from dict_filters import * from hash_filters import * from list_filters import * from", "* from hash_filters import * from list_filters import * from version_filters import *", "import * from hash_filters import * from list_filters import * from version_filters import" ]
[ "+ date[4:] first = arrow.get(date0) quan = quan_name.split(\"m_\")[0] m = -1 * int(quan)", "range of the used months\"\"\" start = start[:4] + '-' + start[4:] startdate", "os.makedirs(trainDataDir) trainFile = quantile + '/' + get_train(Date, quantileName) shutil.copy(trainFile, trainDataDir) print(quantile, 'DONE')", "= 'experiment_data_2' periodInfo = 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31',", "dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile,", "'experiment_data_2' periodInfo = 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33'])", "= os.path.basename(feature) for Date in get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st,", "'{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature + '/testing/' +", "= '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile +", "featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile + '/' + get_train(Date,", "featureName = os.path.basename(feature) for Date in get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir =", "quan = quan_name.split(\"m_\")[0] m = -1 * int(quan) second = first.shift(months=-1) second =", "start[4:] startdate = arrow.get(start) end = end[:4] + '-' + end[4:] enddate =", "= date[:4] + '-' + date[4:] first = arrow.get(date0) quan = quan_name.split(\"m_\")[0] m", "= 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st)", "testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature +", "+ '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*') for quantile", "= first.shift(months=m) first = first.format('YYYYMM') ret = first + '-' + second +", "glob def get_date_range(start, end): \"\"\"get the date range of the used months\"\"\" start", "= '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature + '/testing/'", "rootDir = 'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo = 'monthly'", "trainDataList = glob.glob(feature + '/training/*m_*_*') for quantile in trainDataList: quantileName = os.path.basename(quantile) if", "endDate): Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir)", "= end[:4] + '-' + end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month', startdate, enddate)", "'{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir) for feature in featureList: featureName = os.path.basename(feature)", "+ second + '_train.csv' return ret def get_test(date): \"\"\"get the file name of", "= first.shift(months=-1) second = second.format(\"YYYYMM\") first = first.shift(months=m) first = first.format('YYYYMM') ret =", "if quantileName not in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if", "'24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile =", "= '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir)", "= glob.glob(featureDir) for feature in featureList: featureName = os.path.basename(feature) for Date in get_date_range(startDate,", "= 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15'])", "get_train(date, quan_name): \"\"\"get the file name of the training data\"\"\" date0 = date[:4]", "= start[:4] + '-' + start[4:] startdate = arrow.get(start) end = end[:4] +", "+ '-' + start[4:] startdate = arrow.get(start) end = end[:4] + '-' +", "data\"\"\" ret = date + 'pred.csv' return ret startDate = '201805' endDate =", "enddate) def get_train(date, quan_name): \"\"\"get the file name of the training data\"\"\" date0", "the date range of the used months\"\"\" start = start[:4] + '-' +", "trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile", "'-' + end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name):", "= Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile =", "= second.format(\"YYYYMM\") first = first.shift(months=m) first = first.format('YYYYMM') ret = first + '-'", "'6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate)", "= '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir) for feature in featureList: featureName =", "'201805' endDate = '201805' rootDir = 'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo =", "get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*') for quantile in trainDataList: quantileName", "shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir) for feature in", "testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*') for quantile in trainDataList: quantileName = os.path.basename(quantile)", "start = start[:4] + '-' + start[4:] startdate = arrow.get(start) end = end[:4]", "usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile", "* int(quan) second = first.shift(months=-1) second = second.format(\"YYYYMM\") first = first.shift(months=m) first =", "return ret def get_test(date): \"\"\"get the file name of the test data\"\"\" ret", "file name of the test data\"\"\" ret = date + 'pred.csv' return ret", "dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo = 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16',", "+ get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*') for quantile in trainDataList:", "os.path.basename(feature) for Date in get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo,", "os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature + '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList =", "'{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile + '/'", "'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir", "end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name): \"\"\"get the", "os.makedirs(testDataDir) testFile = feature + '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature", "used months\"\"\" start = start[:4] + '-' + start[4:] startdate = arrow.get(start) end", "for feature in featureList: featureName = os.path.basename(feature) for Date in get_date_range(startDate, endDate): Date", "if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir,", "testFile = feature + '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature +", "feature + '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*') for", "usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13'])", "arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name): \"\"\"get the file name of the training", "featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir) for feature in featureList: featureName", "os.path.basename(quantile) if quantileName not in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName)", "usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir,", "the training data\"\"\" date0 = date[:4] + '-' + date[4:] first = arrow.get(date0)", "+ '-' + end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def get_train(date,", "'_train.csv' return ret def get_test(date): \"\"\"get the file name of the test data\"\"\"", "dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir) for feature in featureList:", "first = first.format('YYYYMM') ret = first + '-' + second + '_train.csv' return", "return arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name): \"\"\"get the file name of the", "quan_name): \"\"\"get the file name of the training data\"\"\" date0 = date[:4] +", "int(quan) second = first.shift(months=-1) second = second.format(\"YYYYMM\") first = first.shift(months=m) first = first.format('YYYYMM')", "= 'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo = 'monthly' usedQuantile", "= feature + '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*')", "os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile + '/' + get_train(Date, quantileName) shutil.copy(trainFile, trainDataDir) print(quantile,", "get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir):", "periodInfo = 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13',", "usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate,", "'/training/*m_*_*') for quantile in trainDataList: quantileName = os.path.basename(quantile) if quantileName not in usedQuantile:", "first.format('YYYYMM') ret = first + '-' + second + '_train.csv' return ret def", "+ end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name): \"\"\"get", "+ '-' + second + '_train.csv' return ret def get_test(date): \"\"\"get the file", "second = second.format(\"YYYYMM\") first = first.shift(months=m) first = first.format('YYYYMM') ret = first +", "+ '/training/*m_*_*') for quantile in trainDataList: quantileName = os.path.basename(quantile) if quantileName not in", "ret = first + '-' + second + '_train.csv' return ret def get_test(date):", "of the training data\"\"\" date0 = date[:4] + '-' + date[4:] first =", "periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature + '/testing/' + get_test(Date)", "= '201805' endDate = '201805' rootDir = 'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo", "\"\"\"get the file name of the training data\"\"\" date0 = date[:4] + '-'", "date[4:] first = arrow.get(date0) quan = quan_name.split(\"m_\")[0] m = -1 * int(quan) second", "ret def get_test(date): \"\"\"get the file name of the test data\"\"\" ret =", "import arrow import glob def get_date_range(start, end): \"\"\"get the date range of the", "first.shift(months=m) first = first.format('YYYYMM') ret = first + '-' + second + '_train.csv'", "# dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo = 'monthly' usedQuantile = []", "dataInfo = 'experiment_data_2' periodInfo = 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18'])", "the test data\"\"\" ret = date + 'pred.csv' return ret startDate = '201805'", "glob.glob(feature + '/training/*m_*_*') for quantile in trainDataList: quantileName = os.path.basename(quantile) if quantileName not", "periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile + '/' +", "= 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo = 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18'])", "'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11',", "import glob def get_date_range(start, end): \"\"\"get the date range of the used months\"\"\"", "of the used months\"\"\" start = start[:4] + '-' + start[4:] startdate =", "in featureList: featureName = os.path.basename(feature) for Date in get_date_range(startDate, endDate): Date = Date.format('YYYYMM')", "'pred.csv' return ret startDate = '201805' endDate = '201805' rootDir = 'D:/rongshidata' #", "os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList", "the used months\"\"\" start = start[:4] + '-' + start[4:] startdate = arrow.get(start)", "quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile + '/' + get_train(Date, quantileName)", "def get_date_range(start, end): \"\"\"get the date range of the used months\"\"\" start =", "in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir)", "training data\"\"\" date0 = date[:4] + '-' + date[4:] first = arrow.get(date0) quan", "file name of the training data\"\"\" date0 = date[:4] + '-' + date[4:]", "= first.format('YYYYMM') ret = first + '-' + second + '_train.csv' return ret", "quan_name.split(\"m_\")[0] m = -1 * int(quan) second = first.shift(months=-1) second = second.format(\"YYYYMM\") first", "Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature", "featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature + '/testing/' + get_test(Date) shutil.copy(testFile,", "'3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st)", "+ '_train.csv' return ret def get_test(date): \"\"\"get the file name of the test", "'/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*') for quantile in", "not in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir):", "= arrow.get(date0) quan = quan_name.split(\"m_\")[0] m = -1 * int(quan) second = first.shift(months=-1)", "usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st):", "= first + '-' + second + '_train.csv' return ret def get_test(date): \"\"\"get", "glob.glob(featureDir) for feature in featureList: featureName = os.path.basename(feature) for Date in get_date_range(startDate, endDate):", "[] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st =", "'36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo)", "def get_test(date): \"\"\"get the file name of the test data\"\"\" ret = date", "= arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name): \"\"\"get the file name", "not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature + '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir) trainDataList", "if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile = feature + '/testing/' + get_test(Date) shutil.copy(testFile, testDataDir)", "featureList: featureName = os.path.basename(feature) for Date in get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir", "dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir) for feature", "= '201805' rootDir = 'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo", "get_test(date): \"\"\"get the file name of the test data\"\"\" ret = date +", "'201805' rootDir = 'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo =", "featureList = glob.glob(featureDir) for feature in featureList: featureName = os.path.basename(feature) for Date in", "endDate = '201805' rootDir = 'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2'", "not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile + '/' + get_train(Date, quantileName) shutil.copy(trainFile, trainDataDir)", "quantileName = os.path.basename(quantile) if quantileName not in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo,", "= -1 * int(quan) second = first.shift(months=-1) second = second.format(\"YYYYMM\") first = first.shift(months=m)", "of the test data\"\"\" ret = date + 'pred.csv' return ret startDate =", "months\"\"\" start = start[:4] + '-' + start[4:] startdate = arrow.get(start) end =", "arrow.get(start) end = end[:4] + '-' + end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month',", "arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name): \"\"\"get the file name of", "+ 'pred.csv' return ret startDate = '201805' endDate = '201805' rootDir = 'D:/rongshidata'", "import shutil import arrow import glob def get_date_range(start, end): \"\"\"get the date range", "the file name of the test data\"\"\" ret = date + 'pred.csv' return", "'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo = 'monthly' usedQuantile = [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16',", "quantileName not in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not", "not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo,", "data\"\"\" date0 = date[:4] + '-' + date[4:] first = arrow.get(date0) quan =", "os import shutil import arrow import glob def get_date_range(start, end): \"\"\"get the date", "date + 'pred.csv' return ret startDate = '201805' endDate = '201805' rootDir =", "start[:4] + '-' + start[4:] startdate = arrow.get(start) end = end[:4] + '-'", "get_date_range(start, end): \"\"\"get the date range of the used months\"\"\" start = start[:4]", "usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile", "end[:4] + '-' + end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def", "= arrow.get(start) end = end[:4] + '-' + end[4:] enddate = arrow.get(end) return", "second + '_train.csv' return ret def get_test(date): \"\"\"get the file name of the", "= quan_name.split(\"m_\")[0] m = -1 * int(quan) second = first.shift(months=-1) second = second.format(\"YYYYMM\")", "date range of the used months\"\"\" start = start[:4] + '-' + start[4:]", "\"\"\"get the file name of the test data\"\"\" ret = date + 'pred.csv'", "test data\"\"\" ret = date + 'pred.csv' return ret startDate = '201805' endDate", "-1 * int(quan) second = first.shift(months=-1) second = second.format(\"YYYYMM\") first = first.shift(months=m) first", "= os.path.basename(quantile) if quantileName not in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName,", "the file name of the training data\"\"\" date0 = date[:4] + '-' +", "first.shift(months=-1) second = second.format(\"YYYYMM\") first = first.shift(months=m) first = first.format('YYYYMM') ret = first", "end = end[:4] + '-' + end[4:] enddate = arrow.get(end) return arrow.Arrow.range('month', startdate,", "ret startDate = '201805' endDate = '201805' rootDir = 'D:/rongshidata' # dataInfo =", "+ start[4:] startdate = arrow.get(start) end = end[:4] + '-' + end[4:] enddate", "shutil import arrow import glob def get_date_range(start, end): \"\"\"get the date range of", "end): \"\"\"get the date range of the used months\"\"\" start = start[:4] +", "first = first.shift(months=m) first = first.format('YYYYMM') ret = first + '-' + second", "closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList =", "feature in featureList: featureName = os.path.basename(feature) for Date in get_date_range(startDate, endDate): Date =", "name of the training data\"\"\" date0 = date[:4] + '-' + date[4:] first", "'D:/rongshidata' # dataInfo = 'experiment_data_1' dataInfo = 'experiment_data_2' periodInfo = 'monthly' usedQuantile =", "second = first.shift(months=-1) second = second.format(\"YYYYMM\") first = first.shift(months=m) first = first.format('YYYYMM') ret", "'{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo) featureList = glob.glob(featureDir) for", "in get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not", "if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile = quantile + '/' + get_train(Date, quantileName) shutil.copy(trainFile,", "\"\"\"get the date range of the used months\"\"\" start = start[:4] + '-'", "def get_train(date, quan_name): \"\"\"get the file name of the training data\"\"\" date0 =", "m = -1 * int(quan) second = first.shift(months=-1) second = second.format(\"YYYYMM\") first =", "return ret startDate = '201805' endDate = '201805' rootDir = 'D:/rongshidata' # dataInfo", "for Date in get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName)", "'-' + second + '_train.csv' return ret def get_test(date): \"\"\"get the file name", "for quantile in trainDataList: quantileName = os.path.basename(quantile) if quantileName not in usedQuantile: continue", "quantile in trainDataList: quantileName = os.path.basename(quantile) if quantileName not in usedQuantile: continue trainDataDir", "in trainDataList: quantileName = os.path.basename(quantile) if quantileName not in usedQuantile: continue trainDataDir =", "date[:4] + '-' + date[4:] first = arrow.get(date0) quan = quan_name.split(\"m_\")[0] m =", "enddate = arrow.get(end) return arrow.Arrow.range('month', startdate, enddate) def get_train(date, quan_name): \"\"\"get the file", "= [] usedQuantile.extend(['6m_1_16', '6m_3_18']) usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st", "periodInfo) featureList = glob.glob(featureDir) for feature in featureList: featureName = os.path.basename(feature) for Date", "Date in get_date_range(startDate, endDate): Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if", "import os import shutil import arrow import glob def get_date_range(start, end): \"\"\"get the", "dataInfo, periodInfo) featureList = glob.glob(featureDir) for feature in featureList: featureName = os.path.basename(feature) for", "continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName) if not os.path.exists(trainDataDir): os.makedirs(trainDataDir) trainFile =", "startdate = arrow.get(start) end = end[:4] + '-' + end[4:] enddate = arrow.get(end)", "first + '-' + second + '_train.csv' return ret def get_test(date): \"\"\"get the", "usedQuantile.extend(['12m_1_16', '12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if", "'12m_3_18']) usedQuantile.extend(['3m_1_31', '3m_3_33']) usedQuantile.extend(['24m_1_13', '24m_3_15']) usedQuantile.extend(['36m_1_11', '36m_3_13']) dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate) if not", "endDate) if not os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir =", "startDate = '201805' endDate = '201805' rootDir = 'D:/rongshidata' # dataInfo = 'experiment_data_1'", "date0 = date[:4] + '-' + date[4:] first = arrow.get(date0) quan = quan_name.split(\"m_\")[0]", "'-' + date[4:] first = arrow.get(date0) quan = quan_name.split(\"m_\")[0] m = -1 *", "arrow.get(date0) quan = quan_name.split(\"m_\")[0] m = -1 * int(quan) second = first.shift(months=-1) second", "name of the test data\"\"\" ret = date + 'pred.csv' return ret startDate", "'-' + start[4:] startdate = arrow.get(start) end = end[:4] + '-' + end[4:]", "ret = date + 'pred.csv' return ret startDate = '201805' endDate = '201805'", "startdate, enddate) def get_train(date, quan_name): \"\"\"get the file name of the training data\"\"\"", "os.path.exists(dir1st): os.mkdir(dir1st) closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo) shutil.copy(closePriceFile, dir1st) featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo)", "Date = Date.format('YYYYMM') testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName) if not os.path.exists(testDataDir): os.makedirs(testDataDir) testFile", "= glob.glob(feature + '/training/*m_*_*') for quantile in trainDataList: quantileName = os.path.basename(quantile) if quantileName", "shutil.copy(testFile, testDataDir) trainDataList = glob.glob(feature + '/training/*m_*_*') for quantile in trainDataList: quantileName =", "first = arrow.get(date0) quan = quan_name.split(\"m_\")[0] m = -1 * int(quan) second =", "arrow import glob def get_date_range(start, end): \"\"\"get the date range of the used", "trainDataList: quantileName = os.path.basename(quantile) if quantileName not in usedQuantile: continue trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st,", "+ '-' + date[4:] first = arrow.get(date0) quan = quan_name.split(\"m_\")[0] m = -1", "= date + 'pred.csv' return ret startDate = '201805' endDate = '201805' rootDir", "second.format(\"YYYYMM\") first = first.shift(months=m) first = first.format('YYYYMM') ret = first + '-' +" ]
[ "bool value: new value of site configuration \"\"\" # ConfigParser stores bool in", "configuration :returns: value of site configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name) def", "\"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\" Set a site configuration boolean.", "return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\" Get a", "g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile)", "Set a site configuration boolean. :param basestring name: name of site configuration :param", "site configuration :returns: value of site configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name)", "g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser", "a site configuration boolean. :param basestring name: name of site configuration :returns: value", "bool in memory, and getboolean expects string _get_parser().set('siteconfig', name, str(int(value))) g.siteconfigHasWrites = True", "configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites def", "-*- coding: UTF-8 -*- # \"\"\"Module to access site configuration in siteconfig.ini.\"\"\" from", "g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser", "return g.siteconfigParser def get_config(name): \"\"\" Get a site configuration boolean. :param basestring name:", "\"\"\" Get a site configuration boolean. :param basestring name: name of site configuration", "a site configuration boolean. :param basestring name: name of site configuration :param bool", "flask import g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None): if commit:", "def _get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser", ":rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\" Set a site", "if g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as configfile:", "with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites =", "g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\"", "None del g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None):", "ConfigParser import ConfigParser from flask import g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if", "of site configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\"", "configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\" Set a", "in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from flask import g FILENAME = '/srv/oclubs/siteconfig.ini'", "g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\" Get a site", "python # -*- coding: UTF-8 -*- # \"\"\"Module to access site configuration in", "site configuration \"\"\" # ConfigParser stores bool in memory, and getboolean expects string", "False): with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites", "None): if commit: if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser", "open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites = None", "g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return", "g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\" Get a site configuration", "boolean. :param basestring name: name of site configuration :param bool value: new value", ":param basestring name: name of site configuration :param bool value: new value of", "of site configuration :param bool value: new value of site configuration \"\"\" #", "g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites def _get_parser():", "\"\"\" # ConfigParser stores bool in memory, and getboolean expects string _get_parser().set('siteconfig', name,", "site configuration boolean. :param basestring name: name of site configuration :returns: value of", "None del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser = ConfigParser()", "'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites = None del", "site configuration boolean. :param basestring name: name of site configuration :param bool value:", "def set_config(name, value): \"\"\" Set a site configuration boolean. :param basestring name: name", "get_config(name): \"\"\" Get a site configuration boolean. :param basestring name: name of site", "None): return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\" Get", "name: name of site configuration :returns: value of site configuration :rtype: bool \"\"\"", ":returns: value of site configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name,", "from ConfigParser import ConfigParser from flask import g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True):", "= None del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser =", "configuration boolean. :param basestring name: name of site configuration :returns: value of site", ":param bool value: new value of site configuration \"\"\" # ConfigParser stores bool", "stores bool in memory, and getboolean expects string _get_parser().set('siteconfig', name, str(int(value))) g.siteconfigHasWrites =", "name) def set_config(name, value): \"\"\" Set a site configuration boolean. :param basestring name:", "name of site configuration :param bool value: new value of site configuration \"\"\"", "value of site configuration \"\"\" # ConfigParser stores bool in memory, and getboolean", "name of site configuration :returns: value of site configuration :rtype: bool \"\"\" return", "site configuration in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from flask import g FILENAME", "ConfigParser stores bool in memory, and getboolean expects string _get_parser().set('siteconfig', name, str(int(value))) g.siteconfigHasWrites", "configuration in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from flask import g FILENAME =", "FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites', False):", "# ConfigParser stores bool in memory, and getboolean expects string _get_parser().set('siteconfig', name, str(int(value)))", "new value of site configuration \"\"\" # ConfigParser stores bool in memory, and", "_get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\" Set a site configuration boolean. :param basestring", "#!/usr/bin/env python # -*- coding: UTF-8 -*- # \"\"\"Module to access site configuration", "\"\"\" Set a site configuration boolean. :param basestring name: name of site configuration", "ConfigParser from flask import g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None):", "coding: UTF-8 -*- # \"\"\"Module to access site configuration in siteconfig.ini.\"\"\" from ConfigParser", "# -*- coding: UTF-8 -*- # \"\"\"Module to access site configuration in siteconfig.ini.\"\"\"", "import ConfigParser from flask import g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser',", "as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites", "del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME)", "# \"\"\"Module to access site configuration in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from", "if commit: if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser =", "siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from flask import g FILENAME = '/srv/oclubs/siteconfig.ini' def", "from flask import g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None): if", "boolean. :param basestring name: name of site configuration :returns: value of site configuration", "basestring name: name of site configuration :param bool value: new value of site", "'/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites', False): with open(FILENAME,", "import g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None): if commit: if", "of site configuration :returns: value of site configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig',", "_done(commit=True): if g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as", "set_config(name, value): \"\"\" Set a site configuration boolean. :param basestring name: name of", "if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name):", ":param basestring name: name of site configuration :returns: value of site configuration :rtype:", "ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\" Get a site configuration boolean. :param", "-*- # \"\"\"Module to access site configuration in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser", "def _done(commit=True): if g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w')", "Get a site configuration boolean. :param basestring name: name of site configuration :returns:", "g FILENAME = '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites',", "= None del g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser',", "if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None del", "value of site configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name, value):", "configuration :param bool value: new value of site configuration \"\"\" # ConfigParser stores", "g.siteconfigParser def get_config(name): \"\"\" Get a site configuration boolean. :param basestring name: name", "g.siteconfigHasWrites = None del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser", "UTF-8 -*- # \"\"\"Module to access site configuration in siteconfig.ini.\"\"\" from ConfigParser import", "site configuration :rtype: bool \"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\" Set", "value): \"\"\" Set a site configuration boolean. :param basestring name: name of site", "commit: if g.get('siteconfigHasWrites', False): with open(FILENAME, 'w') as configfile: g.siteconfigParser.write(configfile) g.siteconfigParser = None", "_get_parser(): if g.get('siteconfigParser', None): return g.siteconfigParser g.siteconfigParser = ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def", "of site configuration \"\"\" # ConfigParser stores bool in memory, and getboolean expects", "name: name of site configuration :param bool value: new value of site configuration", "del g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites def _get_parser(): if g.get('siteconfigParser', None): return", "basestring name: name of site configuration :returns: value of site configuration :rtype: bool", "= '/srv/oclubs/siteconfig.ini' def _done(commit=True): if g.get('siteconfigParser', None): if commit: if g.get('siteconfigHasWrites', False): with", "= ConfigParser() g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\" Get a site configuration boolean.", "\"\"\"Module to access site configuration in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from flask", "configuration \"\"\" # ConfigParser stores bool in memory, and getboolean expects string _get_parser().set('siteconfig',", "bool \"\"\" return _get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\" Set a site configuration", "value: new value of site configuration \"\"\" # ConfigParser stores bool in memory,", "g.siteconfigParser.read(FILENAME) return g.siteconfigParser def get_config(name): \"\"\" Get a site configuration boolean. :param basestring", "def get_config(name): \"\"\" Get a site configuration boolean. :param basestring name: name of", "site configuration :param bool value: new value of site configuration \"\"\" # ConfigParser", "to access site configuration in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from flask import", "return _get_parser().getboolean('siteconfig', name) def set_config(name, value): \"\"\" Set a site configuration boolean. :param", "g.siteconfigParser = None del g.siteconfigParser g.siteconfigHasWrites = None del g.siteconfigHasWrites def _get_parser(): if", "access site configuration in siteconfig.ini.\"\"\" from ConfigParser import ConfigParser from flask import g", "configuration boolean. :param basestring name: name of site configuration :param bool value: new" ]
[ "popular in the Western Hemisphere. (The Fourth of July didn't see regular celebration", "hours and min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks", "find four of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80') #", "grouping def test_numbers(self): self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789') if __name__ == '__main__': unittest.main()", "15:54:00.123-0800') # Checks with hours and min with seconds with milliseconds and timezone", "', library.dates_newiso8601, '2018-06-21') # Checks with hours and min def test_dates_3(self): self.assert_extract(' 2018-06-21", "'2018-06-21 15:54:00.123-0800') # Checks with hours and min with seconds with milliseconds and", "holiday becoming popular in the Western Hemisphere. (The Fourth of July didn't see", "\"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second unit test; prove that", "for integers, we find four of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15',", "expected])) # First unit test; prove that if we scan NUM_CORPUS looking for", "test_dates(self): self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born", "of the Mexican Revolution), and is the 1st example of a national independence", "15:54:00.123Z') # Checks with hours and min with seconds with milliseconds and timezone", "and , after the month def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3,", "was born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\",", "self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def", "', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with hours and min with seconds with", "after the month def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun,", "celebrate Cinco de Mayo. This tradition began in 1845 (the twenty-second anniversary of", "Fourth of July didn't see regular celebration in the US until 15-20 years", "library.dates_fmt3, '21 Jun, 2018') # Checks for date format - regular def test_dates_fmt31(self):", "with milliseconds and timezone offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601,", "of the population-- trending toward 80. ''' class TestCase(unittest.TestCase): # Helper function def", "was born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born on 25 Jan", "1845 (the twenty-second anniversary of the Mexican Revolution), and is the 1st example", "with seconds with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123')", "date format - regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21", "and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks", "(the twenty-second anniversary of the Mexican Revolution), and is the 1st example of", "of May every year, Mexicans celebrate Cinco de Mayo. This tradition began in", "is celebrated by 77.9% of the population-- trending toward 80. ''' class TestCase(unittest.TestCase):", "Checks with hours and min with seconds with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21", "Mexican Revolution), and is the 1st example of a national independence holiday becoming", "month def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018') #", "Checks only for the date def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') #", "results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I was born on 2015-12-31.',", "unit test; prove that if we look for integers, we find four of", "national independence holiday becoming popular in the Western Hemisphere. (The Fourth of July", "and min with seconds with milliseconds and timezone offset -0800 def test_dates_7(self): self.assert_extract('", "test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with hours and", "library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x in expected])) # First unit test; prove", "unit test; prove that if we look for integers where there are none,", "# Third unit test; prove that if we look for integers where there", "that if we look for integers where there are none, we get no", "def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only for", "self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only for the date", "look for integers, we find four of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845',", "2017') # Checks for the iso date format with full Date 2018-06-21 15:54:14.87Z", "', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with hours and min with seconds with", "library.dates_newiso8601, '2018-06-21 15:54') # Checks with hours and min with seconds def test_dates_4(self):", "regular celebration in the US until 15-20 years later.) It is celebrated by", "Support comma seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789') if __name__", "there are none, we get no results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def", "test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks with hours and min def", "seconds with milliseconds and timezone offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ',", "library.dates_fmt3, '21 Jun 2018') # Support comma seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789", "2017.', library.dates_fmt2, '25 Jan 2017') # Checks for the iso date format with", "in 1845 (the twenty-second anniversary of the Mexican Revolution), and is the 1st", "min with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') #", "min with seconds with milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ',", "milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with", "hours and min with seconds with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ',", "min with seconds with milliseconds and timezone offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21", "'21 Jun 2018') # Support comma seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789 ',", "def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with hours", "'2018-06-21 15:54:00') # Checks with hours and min with seconds with milliseconds def", "hours and min with seconds with milliseconds and timezone offset -0800 def test_dates_7(self):", "a national independence holiday becoming popular in the Western Hemisphere. (The Fourth of", "'1st') # Second unit test; prove that if we look for integers, we", "Third unit test; prove that if we look for integers where there are", "none, we get no results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I", "them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80') # Third unit test;", "NUM_CORPUS looking for mixed_ordinals, # we find \"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS,", "four of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80') # Third", "we look for integers, we find four of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers,", "', library.dates_fmt3, '21 Jun, 2018') # Checks for date format - regular def", "def assert_extract(self, text, extractors, *expected): actual = [x[1].group(0) for x in library.scan(text, extractors)]", "self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80') # Third unit test; prove that if", "test; prove that if we scan NUM_CORPUS looking for mixed_ordinals, # we find", "def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601,", "15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks with hours and min with seconds def", "and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second unit test; prove", "library.dates_newiso8601, '2018-06-21') # Checks with hours and min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54',", "test; prove that if we look for integers where there are none, we", "US until 15-20 years later.) It is celebrated by 77.9% of the population--", "by 77.9% of the population-- trending toward 80. ''' class TestCase(unittest.TestCase): # Helper", "scan NUM_CORPUS looking for mixed_ordinals, # we find \"5th\" and \"1st\". def test_mixed_ordinals(self):", "integers\", library.integers) def test_dates(self): self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self):", "- regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018')", "library NUM_CORPUS = ''' On the 5th of May every year, Mexicans celebrate", "the date def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks with hours", "import unittest import library NUM_CORPUS = ''' On the 5th of May every", "date def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks with hours and", "= [x[1].group(0) for x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x in expected]))", "-0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for", "Jun 2018 ', library.dates_fmt3, '21 Jun 2018') # Support comma seperated grouping def", "we get no results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I was", "for the date def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks with", "# Checks with hours and min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21", "Checks with hours and min with seconds with milliseconds and timezone offset -0800", "Cinco de Mayo. This tradition began in 1845 (the twenty-second anniversary of the", "self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with hours and min", "trending toward 80. ''' class TestCase(unittest.TestCase): # Helper function def assert_extract(self, text, extractors,", "2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks with hours and min with seconds", "self.assertEquals(str(actual), str([x for x in expected])) # First unit test; prove that if", "hours and min with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21", "and min with seconds with milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z", "years later.) It is celebrated by 77.9% of the population-- trending toward 80.", "'20', '80') # Third unit test; prove that if we look for integers", "Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') #", "Jun, 2018') # Checks for date format - regular def test_dates_fmt31(self): self.assert_extract(' 21", "look for integers where there are none, we get no results. def test_no_integers(self):", "the US until 15-20 years later.) It is celebrated by 77.9% of the", "seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789') if __name__ == '__main__':", "2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2, '25", "for date format and , after the month def test_dates_fmt3(self): self.assert_extract(' 21 Jun,", "library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with hours and min with seconds with milliseconds", "15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with hours and min with seconds", "full Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876')", "min with seconds with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21", "# Checks for date format and , after the month def test_dates_fmt3(self): self.assert_extract('", "'2018-06-21') # Checks with hours and min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601,", "with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks", "-0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with", "Helper function def assert_extract(self, text, extractors, *expected): actual = [x[1].group(0) for x in", "Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018') # Checks for date format -", "'2018-06-21 15:54:00.123-0800') # Checks for date format and , after the month def", "anniversary of the Mexican Revolution), and is the 1st example of a national", "date format and , after the month def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018", "2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks", "15:54:14.876') # Checks only for the date def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601,", "if we scan NUM_CORPUS looking for mixed_ordinals, # we find \"5th\" and \"1st\".", "Checks for the iso date format with full Date 2018-06-21 15:54:14.87Z def test_dates_1(self):", "self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with hours and min", "Jan 2017.', library.dates_fmt2, '25 Jan 2017') # Checks for the iso date format", "# Support comma seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789') if", "def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for date", "2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with hours and min with", "21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018') # Support comma seperated grouping", "= ''' On the 5th of May every year, Mexicans celebrate Cinco de", "x in expected])) # First unit test; prove that if we scan NUM_CORPUS", "milliseconds and timezone offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21", "timezone offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') #", "until 15-20 years later.) It is celebrated by 77.9% of the population-- trending", "test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks with hours and min", "and min with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00')", "'21 Jun, 2018') # Checks for date format - regular def test_dates_fmt31(self): self.assert_extract('", "def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second unit test; prove that if", "15:54:00.123-0800') # Checks for date format and , after the month def test_dates_fmt3(self):", "prove that if we look for integers where there are none, we get", "see regular celebration in the US until 15-20 years later.) It is celebrated", "2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for date format and ,", "for integers where there are none, we get no results. def test_no_integers(self): self.assert_extract(\"no", "Checks with hours and min with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ',", "21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018') # Checks for date format", "Mayo. This tradition began in 1845 (the twenty-second anniversary of the Mexican Revolution),", "test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31')", ", after the month def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21", "date format with full Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ',", "tradition began in 1845 (the twenty-second anniversary of the Mexican Revolution), and is", "and min with seconds with milliseconds and timezone offset -0800 def test_dates_8(self): self.assert_extract('", "integers, we find four of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20',", "the population-- trending toward 80. ''' class TestCase(unittest.TestCase): # Helper function def assert_extract(self,", "on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601) def", "def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks with hours and min", "Jun 2018') # Support comma seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789 ', library.comma_seperator,", "celebrated by 77.9% of the population-- trending toward 80. ''' class TestCase(unittest.TestCase): #", "Jan 2017') # Checks for the iso date format with full Date 2018-06-21", "15:54') # Checks with hours and min with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21", "self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with hours and min", "# Checks for the iso date format with full Date 2018-06-21 15:54:14.87Z def", "15:54:00') # Checks with hours and min with seconds with milliseconds def test_dates_5(self):", "test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018') # Checks for", "2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self):", "seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with", "def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018') # Support", "no results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I was born on", "born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born on 25 Jan 2017.',", "self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born on 25", "twenty-second anniversary of the Mexican Revolution), and is the 1st example of a", "de Mayo. This tradition began in 1845 (the twenty-second anniversary of the Mexican", "with seconds with milliseconds and timezone offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800", "2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks with hours and min def test_dates_3(self): self.assert_extract('", "was born on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017') # Checks for", "library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for date format and , after the month", "# Checks for date format - regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018", "'25 Jan 2017') # Checks for the iso date format with full Date", "', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with hours and min with seconds with", "test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018') # Support comma", "15:54:00.123') # Checks with hours and min with seconds with milliseconds and timezone(Z)", "seconds with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') #", "15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with hours and min with seconds", "It is celebrated by 77.9% of the population-- trending toward 80. ''' class", "2018') # Support comma seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789')", "def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks with hours and", "seconds with milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21", "Hemisphere. (The Fourth of July didn't see regular celebration in the US until", "def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80') # Third unit test; prove", "get no results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self): self.assert_extract('I was born", "and is the 1st example of a national independence holiday becoming popular in", "self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018') # Checks for date", "is the 1st example of a national independence holiday becoming popular in the", "library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I", "15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with hours and min with seconds", "format - regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun", "test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for date format", "15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with hours and min with seconds", "milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') #", "for mixed_ordinals, # we find \"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th',", "def test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born", "assert_extract(self, text, extractors, *expected): actual = [x[1].group(0) for x in library.scan(text, extractors)] self.assertEquals(str(actual),", "# First unit test; prove that if we scan NUM_CORPUS looking for mixed_ordinals,", "test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born on", "and min with seconds with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601,", "timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with", "2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with hours and min with", "and timezone offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800')", "we find four of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80')", "''' On the 5th of May every year, Mexicans celebrate Cinco de Mayo.", "year, Mexicans celebrate Cinco de Mayo. This tradition began in 1845 (the twenty-second", "test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with hours and", "extractors)] self.assertEquals(str(actual), str([x for x in expected])) # First unit test; prove that", "def test_dates(self): self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was", "in expected])) # First unit test; prove that if we scan NUM_CORPUS looking", "born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601)", "the month def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018')", "we find \"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second", "comma seperated grouping def test_numbers(self): self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789') if __name__ ==", "we scan NUM_CORPUS looking for mixed_ordinals, # we find \"5th\" and \"1st\". def", "'2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was", "with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks", "test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second unit test; prove that if we", "'15', '20', '80') # Third unit test; prove that if we look for", "1st example of a national independence holiday becoming popular in the Western Hemisphere.", "born on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017') # Checks for the", "that if we look for integers, we find four of them. def test_integers(self):", "with hours and min with seconds with milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract('", "library.integers, '1845', '15', '20', '80') # Third unit test; prove that if we", "2018 ', library.dates_fmt3, '21 Jun 2018') # Support comma seperated grouping def test_numbers(self):", "library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with hours and min with seconds with milliseconds", "'2018-06-21 15:54') # Checks with hours and min with seconds def test_dates_4(self): self.assert_extract('", "self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for date format and", "for date format - regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3,", "# Second unit test; prove that if we look for integers, we find", "First unit test; prove that if we scan NUM_CORPUS looking for mixed_ordinals, #", "self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017') # Checks", "with seconds with milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601,", "prove that if we look for integers, we find four of them. def", "2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only for the date def", "where there are none, we get no results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers)", "library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with hours and min with seconds with milliseconds", "population-- trending toward 80. ''' class TestCase(unittest.TestCase): # Helper function def assert_extract(self, text,", "format and , after the month def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ',", "2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with hours and min with", "min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks with hours", "On the 5th of May every year, Mexicans celebrate Cinco de Mayo. This", "iso date format with full Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876", "2018 ', library.dates_fmt3, '21 Jun, 2018') # Checks for date format - regular", "', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only for the date def test_dates_2(self): self.assert_extract('", "[x[1].group(0) for x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x in expected])) #", "def test_dates_fmt3(self): self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018') # Checks", "str([x for x in expected])) # First unit test; prove that if we", "self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with hours and min", "Revolution), and is the 1st example of a national independence holiday becoming popular", "for the iso date format with full Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract('", "'2018-06-21 15:54:14.876') # Checks only for the date def test_dates_2(self): self.assert_extract(' 2018-06-21 ',", "'2018-06-21 15:54:00.123') # Checks with hours and min with seconds with milliseconds and", "15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only", "class TestCase(unittest.TestCase): # Helper function def assert_extract(self, text, extractors, *expected): actual = [x[1].group(0)", "self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second unit test; prove that if we look", "Checks with hours and min with seconds with milliseconds and timezone(Z) def test_dates_6(self):", "with hours and min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') #", "in the Western Hemisphere. (The Fourth of July didn't see regular celebration in", "x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x in expected])) # First unit", "library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only for the date def test_dates_2(self): self.assert_extract(' 2018-06-21", "self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks with hours and min with", "NUM_CORPUS = ''' On the 5th of May every year, Mexicans celebrate Cinco", "offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks", "of a national independence holiday becoming popular in the Western Hemisphere. (The Fourth", "celebration in the US until 15-20 years later.) It is celebrated by 77.9%", "we look for integers where there are none, we get no results. def", "the 5th of May every year, Mexicans celebrate Cinco de Mayo. This tradition", "of July didn't see regular celebration in the US until 15-20 years later.)", "15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for date format and , after", "for x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x in expected])) # First", "with milliseconds and timezone offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601,", "test; prove that if we look for integers, we find four of them.", "regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018') #", "on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017') # Checks for the iso", "integers where there are none, we get no results. def test_no_integers(self): self.assert_extract(\"no integers\",", "library.integers) def test_dates(self): self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I", "library.dates_fmt2, '25 Jan 2017') # Checks for the iso date format with full", "mixed_ordinals, # we find \"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st')", "77.9% of the population-- trending toward 80. ''' class TestCase(unittest.TestCase): # Helper function", "2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with hours and min with", "with full Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21", "prove that if we scan NUM_CORPUS looking for mixed_ordinals, # we find \"5th\"", "began in 1845 (the twenty-second anniversary of the Mexican Revolution), and is the", "self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31') def test_dates_no_integers(self): self.assert_extract(\"I was born on", "'1845', '15', '20', '80') # Third unit test; prove that if we look", "becoming popular in the Western Hemisphere. (The Fourth of July didn't see regular", "def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with hours", "15-20 years later.) It is celebrated by 77.9% of the population-- trending toward", "only for the date def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks", "''' class TestCase(unittest.TestCase): # Helper function def assert_extract(self, text, extractors, *expected): actual =", "for x in expected])) # First unit test; prove that if we scan", "def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00') # Checks with hours", "unit test; prove that if we scan NUM_CORPUS looking for mixed_ordinals, # we", "hours and min with seconds with milliseconds and timezone offset -0800 def test_dates_8(self):", "library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks with hours and min with seconds with milliseconds", "This tradition began in 1845 (the twenty-second anniversary of the Mexican Revolution), and", "def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with hours", "', library.dates_fmt3, '21 Jun 2018') # Support comma seperated grouping def test_numbers(self): self.assert_extract('", "actual = [x[1].group(0) for x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x in", "'5th', '1st') # Second unit test; prove that if we look for integers,", "July didn't see regular celebration in the US until 15-20 years later.) It", "format with full Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601,", "milliseconds and timezone offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21", "test_dates_1(self): self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only for the", "every year, Mexicans celebrate Cinco de Mayo. This tradition began in 1845 (the", "'80') # Third unit test; prove that if we look for integers where", "with milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z')", "unittest import library NUM_CORPUS = ''' On the 5th of May every year,", "Checks with hours and min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54')", "80. ''' class TestCase(unittest.TestCase): # Helper function def assert_extract(self, text, extractors, *expected): actual", "didn't see regular celebration in the US until 15-20 years later.) It is", "timezone offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') #", "2018') # Checks for date format - regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun", "and min def test_dates_3(self): self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54') # Checks with", "with seconds with milliseconds and timezone offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800", "test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with hours and", "the Western Hemisphere. (The Fourth of July didn't see regular celebration in the", "toward 80. ''' class TestCase(unittest.TestCase): # Helper function def assert_extract(self, text, extractors, *expected):", "hours and min with seconds with milliseconds and timezone(Z) def test_dates_6(self): self.assert_extract(' 2018-06-21", "self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018') # Support comma seperated", "extractors, *expected): actual = [x[1].group(0) for x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for", "min with seconds with milliseconds and timezone offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21", "test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80') # Third unit test; prove that", "Second unit test; prove that if we look for integers, we find four", "the 1st example of a national independence holiday becoming popular in the Western", "in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x in expected])) # First unit test;", "with hours and min with seconds with milliseconds and timezone offset -0800 def", "library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2, '25 Jan", "Western Hemisphere. (The Fourth of July didn't see regular celebration in the US", "library.mixed_ordinals, '5th', '1st') # Second unit test; prove that if we look for", "def test_dates_fmt2(self): self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017')", "if we look for integers, we find four of them. def test_integers(self): self.assert_extract(NUM_CORPUS,", "', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks for date format and , after the", "self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21') # Checks with hours and min def test_dates_3(self):", "# we find \"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') #", "with hours and min with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601,", "independence holiday becoming popular in the Western Hemisphere. (The Fourth of July didn't", "25 Jan 2017.', library.dates_fmt2, '25 Jan 2017') # Checks for the iso date", "TestCase(unittest.TestCase): # Helper function def assert_extract(self, text, extractors, *expected): actual = [x[1].group(0) for", "5th of May every year, Mexicans celebrate Cinco de Mayo. This tradition began", "'2018-06-21 15:54:00.123Z') # Checks with hours and min with seconds with milliseconds and", "text, extractors, *expected): actual = [x[1].group(0) for x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x", "and timezone offset -0800 def test_dates_7(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800')", "of them. def test_integers(self): self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80') # Third unit", "the Mexican Revolution), and is the 1st example of a national independence holiday", "15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876') # Checks only for the date def test_dates_2(self):", "# Checks with hours and min with seconds with milliseconds and timezone offset", "May every year, Mexicans celebrate Cinco de Mayo. This tradition began in 1845", "example of a national independence holiday becoming popular in the Western Hemisphere. (The", "if we look for integers where there are none, we get no results.", "# Checks with hours and min with seconds def test_dates_4(self): self.assert_extract(' 2018-06-21 15:54:00", "in the US until 15-20 years later.) It is celebrated by 77.9% of", "import library NUM_CORPUS = ''' On the 5th of May every year, Mexicans", "with hours and min with seconds with milliseconds def test_dates_5(self): self.assert_extract(' 2018-06-21 15:54:00.123", "find \"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second unit", "Checks for date format and , after the month def test_dates_fmt3(self): self.assert_extract(' 21", "Mexicans celebrate Cinco de Mayo. This tradition began in 1845 (the twenty-second anniversary", "', library.dates_newiso8601, '2018-06-21 15:54:00.123') # Checks with hours and min with seconds with", "the iso date format with full Date 2018-06-21 15:54:14.87Z def test_dates_1(self): self.assert_extract(' 2018-06-21", "function def assert_extract(self, text, extractors, *expected): actual = [x[1].group(0) for x in library.scan(text,", "# Checks with hours and min with seconds with milliseconds and timezone(Z) def", "*expected): actual = [x[1].group(0) for x in library.scan(text, extractors)] self.assertEquals(str(actual), str([x for x", "\"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st') # Second unit test;", "test_dates_fmt2(self): self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017') #", "later.) It is celebrated by 77.9% of the population-- trending toward 80. '''", "on 2015-12-31\", library.dates_iso8601) def test_dates_fmt2(self): self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2,", "offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800') # Checks", "Checks for date format - regular def test_dates_fmt31(self): self.assert_extract(' 21 Jun 2018 ',", "(The Fourth of July didn't see regular celebration in the US until 15-20", "test_dates_6(self): self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z') # Checks with hours and", "# Checks only for the date def test_dates_2(self): self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21')", "# Helper function def assert_extract(self, text, extractors, *expected): actual = [x[1].group(0) for x", "seconds with milliseconds and timezone offset -0800 def test_dates_8(self): self.assert_extract(' 2018-06-21 15:54:00.123-0800 ',", "are none, we get no results. def test_no_integers(self): self.assert_extract(\"no integers\", library.integers) def test_dates(self):", "# Checks with hours and min with seconds with milliseconds def test_dates_5(self): self.assert_extract('", "that if we scan NUM_CORPUS looking for mixed_ordinals, # we find \"5th\" and", "looking for mixed_ordinals, # we find \"5th\" and \"1st\". def test_mixed_ordinals(self): self.assert_extract(NUM_CORPUS, library.mixed_ordinals," ]
[ "DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database, source_data_token:", "[str(col_id) for col_id in col_ids] def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]: with", "target_data_token) def move_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with", "self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token: DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token) def", "target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database,", "Optional[BaseException] = None, traceback: Optional[TracebackType] = None, ) -> None: self.stop() def stop(self)", "None: self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) -> list[str]:", "from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import Metadata from tanuki.data_store.query import Query from", "DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self:", "list_groups(self) -> set[str]: with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group: str) -> list[DataToken]:", "metadata=metadata) return cast(store_type, store) def create_table(self: Database, data_token: DataToken, store_type: Type[T]) -> None:", "DatabaseAdapter from .data_token import DataToken from .database_registrar import DatabaseRegistrar from .db_exceptions import MissingTableError", "self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str) -> None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table(", "raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token: DataToken) -> None: with self._db_adapter:", "import Metadata from tanuki.data_store.query import Query from .adapter.database_adapter import DatabaseAdapter from .data_token import", "__init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self,", "= self._registrar.store_type(data_token) return {column.name: column.dtype for column in store_class.columns} def has_table(self, data_token: DataToken)", "Optional[list[ColumnAlias]] = None, ) -> T: with self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token)", "None table_data = self._db_adapter.query(data_token, query, columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] =", "MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self:", "T, alignment_columns: list[ColumnAlias], ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token)", "column.dtype for column in store_class.columns} def has_table(self, data_token: DataToken) -> bool: with self._db_adapter:", "in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert( self: Database, data_token: DataToken, data_store: T,", "Database, type: Optional[Type[BaseException]] = None, value: Optional[BaseException] = None, traceback: Optional[TracebackType] = None,", "def has_group(self, data_group: str) -> bool: with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) ->", "from .db_exceptions import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T = TypeVar(\"T\",", "= self._db_adapter.query(data_token, query, columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata:", "None else None table_data = self._db_adapter.query(data_token, query, columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class:", "from .data_token import DataToken from .database_registrar import DatabaseRegistrar from .db_exceptions import MissingTableError if", "data_store, columns) def delete(self: Database, data_token: DataToken, criteria: Query) -> None: with self._db_adapter:", "with self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self: Database, store_type: Type[T], data_token: DataToken, query:", "None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str) -> None: with self._db_adapter:", "delete(self: Database, data_token: DataToken, criteria: Query) -> None: with self._db_adapter: if not self._registrar.has_table(data_token):", "for col_id in col_ids] def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]: with self._db_adapter:", "None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database, source_data_token: DataToken, target_data_token: DataToken,", "TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) ->", "from .adapter.database_adapter import DatabaseAdapter from .data_token import DataToken from .database_registrar import DatabaseRegistrar from", "raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def", "self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str) -> None: with self._db_adapter: self._registrar.drop_group(data_group) def", "self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token: DataToken)", "import DatabaseRegistrar from .db_exceptions import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T", "= database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) -> list[str]: with self._db_adapter:", "[str(col) for col in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert( self: Database, data_token:", "store_class.columns} def has_table(self, data_token: DataToken) -> bool: with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self)", "upsert( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None: with", "if metadata_class is not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns,", ") -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database, source_data_token: DataToken,", "-> list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for col_id in col_ids]", "None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken) -> int: with self._db_adapter:", "DatabaseAdapter) -> None: self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken)", "source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self,", "None, value: Optional[BaseException] = None, traceback: Optional[TracebackType] = None, ) -> None: self.stop()", "move_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token,", "def __exit__( self: Database, type: Optional[Type[BaseException]] = None, value: Optional[BaseException] = None, traceback:", "Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def", "if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self: Database, data_token: DataToken, data_store: T", "not self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in columns] if columns", "target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database,", "self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in columns] if columns is", "Database, data_group: str) -> None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token:", "self._registrar.list_groups() def list_group_tables(self, data_group: str) -> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def query(", "raise MissingTableError(data_token) columns = [str(col) for col in columns] if columns is not", "with self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in", "not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self: Database, data_token: DataToken, data_store: T )", "= TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar", "self._registrar.metadata_class(data_token) metadata: Optional[M] = None if metadata_class is not None: metadata = self._db_adapter.get_group_table_metadata(data_token,", "def list_group_tables(self, data_group: str) -> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self:", "columns=columns, metadata=metadata) return cast(store_type, store) def create_table(self: Database, data_token: DataToken, store_type: Type[T]) ->", "in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database, data_token: DataToken, criteria: Query) ->", "has_table(self, data_token: DataToken) -> bool: with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]:", "with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) ->", "-> list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group: str) -> bool: with", "if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.upsert(data_token,", "def create_table(self: Database, data_token: DataToken, store_type: Type[T]) -> None: with self._db_adapter: if not", "columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] = None", "list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for col_id in col_ids] def", "self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database, data_token: DataToken, criteria: Query) -> None: with", "with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group: str) -> list[DataToken]: with self._db_adapter: return", "data_token: DataToken, data_store: T ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token,", "T: with self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col", "table_data = self._db_adapter.query(data_token, query, columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token)", "data_group: str) -> bool: with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with", "if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T = TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\",", "data_token: DataToken, query: Optional[Query] = None, columns: Optional[list[ColumnAlias]] = None, ) -> T:", "self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken) -> int: with self._db_adapter: return self._db_adapter.row_count(data_token) def", "self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) -> list[str]: with", "None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, )", "with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self: Database, data_token: DataToken,", "DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token:", "DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter)", "bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self,", "self._db_adapter.insert(data_token, data_store) def update( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], )", "return self._registrar.list_groups() def list_group_tables(self, data_group: str) -> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def", "DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database, source_data_token:", "= None, value: Optional[BaseException] = None, traceback: Optional[TracebackType] = None, ) -> None:", "not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.update(data_token, data_store,", "TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def", "return cast(store_type, store) def create_table(self: Database, data_token: DataToken, store_type: Type[T]) -> None: with", "Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None: with self._db_adapter: if", "raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def", "self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns]", "M = TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter:", "self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in columns]", "not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type,", "return self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database: return self def __exit__( self: Database,", "def drop_group(self: Database, data_group: str) -> None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self:", "source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group(", "-> Database: return self def __exit__( self: Database, type: Optional[Type[BaseException]] = None, value:", "store) def create_table(self: Database, data_token: DataToken, store_type: Type[T]) -> None: with self._db_adapter: if", "for col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database, data_token: DataToken, criteria:", "self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] = None if metadata_class is not", "Metadata from tanuki.data_store.query import Query from .adapter.database_adapter import DatabaseAdapter from .data_token import DataToken", "-> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self: Database, store_type: Type[T], data_token:", "self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None:", "not None else None table_data = self._db_adapter.query(data_token, query, columns) store_class: Type[T] = self._registrar.store_type(data_token)", "class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter", "self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None:", "= self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store) def create_table(self:", "return {column.name: column.dtype for column in store_class.columns} def has_table(self, data_token: DataToken) -> bool:", "= None, traceback: Optional[TracebackType] = None, ) -> None: self.stop() def stop(self) ->", "with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in", "bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) -> None:", "target_data_token) def copy_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with", "Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] = None if metadata_class is not None: metadata", "from tanuki.data_store.query import Query from .adapter.database_adapter import DatabaseAdapter from .data_token import DataToken from", "return [str(col_id) for col_id in col_ids] def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]:", "copy_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token,", "def drop_table(self: Database, data_token: DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database,", "from .database_registrar import DatabaseRegistrar from .db_exceptions import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import", "value: Optional[BaseException] = None, traceback: Optional[TracebackType] = None, ) -> None: self.stop() def", "self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store) def create_table(self: Database,", "set[str]: with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group: str) -> list[DataToken]: with self._db_adapter:", "Query) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def", "self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def has_group(self,", "Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter =", "def copy_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter:", "data_token: DataToken) -> bool: with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with", "self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group: str)", "self._db_adapter.update(data_token, data_store, columns) def upsert( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias],", "-> None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str) -> None: with", "MissingTableError(data_token) columns = [str(col) for col in columns] if columns is not None", "DataToken, query: Optional[Query] = None, columns: Optional[list[ColumnAlias]] = None, ) -> T: with", "data_token: DataToken, store_type: Type[T]) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type)", "TypeVar from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import Metadata", "data_group: str) -> None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token: DataToken,", "-> None: self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) ->", "type: Optional[Type[BaseException]] = None, value: Optional[BaseException] = None, traceback: Optional[TracebackType] = None, )", "self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None:", "create_table(self: Database, data_token: DataToken, store_type: Type[T]) -> None: with self._db_adapter: if not self._registrar.has_table(data_token):", "-> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self:", "self._registrar.create_table(data_token, store_type) def insert( self: Database, data_token: DataToken, data_store: T ) -> None:", "self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self: Database, store_type: Type[T], data_token: DataToken, query: Optional[Query]", "import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type", "= [str(col) for col in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert( self: Database,", "metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store) def create_table(self: Database, data_token:", ") -> T: with self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col)", "None, ) -> T: with self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token) columns =", "DataToken) -> dict[str, DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name: column.dtype for", "cast(store_type, store) def create_table(self: Database, data_token: DataToken, store_type: Type[T]) -> None: with self._db_adapter:", "self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) ->", "self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name: column.dtype for column in store_class.columns} def has_table(self,", "with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, )", "columns) def upsert( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) ->", "DataType from tanuki.data_store.metadata import Metadata from tanuki.data_store.query import Query from .adapter.database_adapter import DatabaseAdapter", "self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group: str) -> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group)", "from tanuki.data_store.metadata import Metadata from tanuki.data_store.query import Query from .adapter.database_adapter import DatabaseAdapter from", "dict[str, DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name: column.dtype for column in", "data_token: DataToken, criteria: Query) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token)", "data_store, columns) def upsert( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], )", "with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database: return self def __exit__(", "copy_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token,", "= TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter)", "Any, cast, Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import", "self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self: Database, data_token: DataToken, data_store: T, alignment_columns:", "row_count(self, data_token: DataToken) -> int: with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database) ->", "def __init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter) def", "with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self: Database,", "col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database, data_token: DataToken, criteria: Query)", "not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token: DataToken) -> None:", "-> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database, source_data_token: DataToken, target_data_token:", "Optional[Query] = None, columns: Optional[list[ColumnAlias]] = None, ) -> T: with self._db_adapter: if", "not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self: Database, data_token: DataToken, data_store:", "not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.upsert(data_token, data_store,", "data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias],", "bool: with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with self._db_adapter: return self._registrar.list_tables()", "None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store)", "source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group(", "def update( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None:", "if not self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in columns] if", "with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, )", "annotations from types import TracebackType from typing import Any, cast, Optional, Type, TYPE_CHECKING,", "def list_groups(self) -> set[str]: with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group: str) ->", "has_group(self, data_group: str) -> bool: with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) -> set[str]:", "import Query from .adapter.database_adapter import DatabaseAdapter from .data_token import DataToken from .database_registrar import", "= self._registrar.metadata_class(data_token) metadata: Optional[M] = None if metadata_class is not None: metadata =", "import annotations from types import TracebackType from typing import Any, cast, Optional, Type,", "from __future__ import annotations from types import TracebackType from typing import Any, cast,", "= None, ) -> T: with self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token) columns", "import ColumnAlias from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import Metadata from tanuki.data_store.query import", "self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns)", "Type[T]) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self:", "col_ids] def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token)", "data_store: T ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token,", "self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for col_id in col_ids] def table_dtypes(self, data_token:", "str) -> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self: Database, store_type: Type[T],", "with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for col_id in col_ids] def table_dtypes(self,", "-> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update(", "metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store) def", "def move_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter:", "str) -> bool: with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with self._db_adapter:", "import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T = TypeVar(\"T\", bound=\"DataStore\") M", "column in store_class.columns} def has_table(self, data_token: DataToken) -> bool: with self._db_adapter: return self._registrar.has_table(data_token)", "for col in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert( self: Database, data_token: DataToken,", "def upsert( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None:", "None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database, source_data_token: DataToken, target_data_token: DataToken,", "self def __exit__( self: Database, type: Optional[Type[BaseException]] = None, value: Optional[BaseException] = None,", "import TracebackType from typing import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias", "DataToken, store_type: Type[T]) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def", "columns] if columns is not None else None table_data = self._db_adapter.query(data_token, query, columns)", "metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] = None if metadata_class is not None:", "None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col", "-> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for", "def copy_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter:", "data_token: DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str) ->", "-> bool: with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with self._db_adapter: return", "list_tables(self) -> list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group: str) -> bool:", "Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import DataType from", "None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self: Database, data_token:", "self: Database, type: Optional[Type[BaseException]] = None, value: Optional[BaseException] = None, traceback: Optional[TracebackType] =", "col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for col_id in col_ids] def table_dtypes(self, data_token: DataToken)", "= None, columns: Optional[list[ColumnAlias]] = None, ) -> T: with self._db_adapter: if not", "str) -> None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token: DataToken, target_data_token:", "source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table(", "-> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database, source_data_token: DataToken, target_data_token:", "from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import Metadata from", "-> T: with self._db_adapter: if not self.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for", "None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database,", "for column in store_class.columns} def has_table(self, data_token: DataToken) -> bool: with self._db_adapter: return", "<filename>src/tanuki/database/database.py from __future__ import annotations from types import TracebackType from typing import Any,", "store_type) def insert( self: Database, data_token: DataToken, data_store: T ) -> None: with", "is not None else None table_data = self._db_adapter.query(data_token, query, columns) store_class: Type[T] =", "with self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group: str) -> bool: with self._db_adapter: return", "self: Database, data_token: DataToken, data_store: T ) -> None: with self._db_adapter: if not", "with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, )", "__future__ import annotations from types import TracebackType from typing import Any, cast, Optional,", "query: Optional[Query] = None, columns: Optional[list[ColumnAlias]] = None, ) -> T: with self._db_adapter:", ") -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col)", "if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self: Database, data_token: DataToken,", "return self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group:", ".adapter.database_adapter import DatabaseAdapter from .data_token import DataToken from .database_registrar import DatabaseRegistrar from .db_exceptions", "cast, Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import DataType", "tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import Metadata from tanuki.data_store.query", "-> bool: with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with self._db_adapter: return", "tanuki.data_store.data_store import DataStore T = TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class Database:", "Database, data_token: DataToken, store_type: Type[T]) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token,", "in columns] if columns is not None else None table_data = self._db_adapter.query(data_token, query,", "store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store) def create_table(self: Database, data_token: DataToken, store_type: Type[T])", "-> set[str]: with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group: str) -> list[DataToken]: with", "update( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None: with", "criteria) def drop_table(self: Database, data_token: DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self:", "def query( self: Database, store_type: Type[T], data_token: DataToken, query: Optional[Query] = None, columns:", "Database, store_type: Type[T], data_token: DataToken, query: Optional[Query] = None, columns: Optional[list[ColumnAlias]] = None,", "Database: return self def __exit__( self: Database, type: Optional[Type[BaseException]] = None, value: Optional[BaseException]", "DataToken from .database_registrar import DatabaseRegistrar from .db_exceptions import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store", "typing import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias from", "int: with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database: return self def", "in store_class.columns} def has_table(self, data_token: DataToken) -> bool: with self._db_adapter: return self._registrar.has_table(data_token) def", "def insert( self: Database, data_token: DataToken, data_store: T ) -> None: with self._db_adapter:", "store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store) def create_table(self: Database, data_token: DataToken,", "Database, data_token: DataToken, data_store: T ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token):", "drop_table(self: Database, data_token: DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group:", "def table_columns(self, data_token: DataToken) -> list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id)", "store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] = None if", "list_group_tables(self, data_group: str) -> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self: Database,", "if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.update(data_token,", "return self._registrar.list_tables() def has_group(self, data_group: str) -> bool: with self._db_adapter: return self._registrar.has_group(data_group) def", "[str(col) for col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database, data_token: DataToken,", "tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import Metadata from tanuki.data_store.query import Query from .adapter.database_adapter", ") -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def", "DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database, source_data_token:", "import DataStore T = TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter:", "Database, data_token: DataToken, criteria: Query) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise", "in col_ids] def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]: with self._db_adapter: store_class =", "Type[T], data_token: DataToken, query: Optional[Query] = None, columns: Optional[list[ColumnAlias]] = None, ) ->", "DataToken) -> bool: with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with self._db_adapter:", "self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) -> list[str]: with self._db_adapter: col_ids =", "__exit__( self: Database, type: Optional[Type[BaseException]] = None, value: Optional[BaseException] = None, traceback: Optional[TracebackType]", "self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self: Database, data_token:", "self._registrar.copy_table(source_data_token, target_data_token) def move_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None:", "= store_class.from_rows(table_data, columns=columns, metadata=metadata) return cast(store_type, store) def create_table(self: Database, data_token: DataToken, store_type:", "columns) def delete(self: Database, data_token: DataToken, criteria: Query) -> None: with self._db_adapter: if", "tanuki.data_store.query import Query from .adapter.database_adapter import DatabaseAdapter from .data_token import DataToken from .database_registrar", "insert( self: Database, data_token: DataToken, data_store: T ) -> None: with self._db_adapter: if", "data_store) def update( self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) ->", "tanuki.data_store.metadata import Metadata from tanuki.data_store.query import Query from .adapter.database_adapter import DatabaseAdapter from .data_token", "self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token: DataToken) -> None: with", "Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata", "alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database, data_token: DataToken, criteria: Query) -> None:", "self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self: Database, data_token: DataToken, data_store:", "table_columns(self, data_token: DataToken) -> list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for", "with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def", "Optional[M] = None if metadata_class is not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store", "def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token) return", "self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) ->", "list[ColumnAlias], ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns =", "TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T = TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\")", "with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token:", "import DataToken from .database_registrar import DatabaseRegistrar from .db_exceptions import MissingTableError if TYPE_CHECKING: from", "DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token):", "DataStore T = TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter", "store_type: Type[T]) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert(", "database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) -> list[str]: with self._db_adapter: col_ids", "metadata_class is not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata)", "list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self: Database, store_type: Type[T], data_token: DataToken,", "for col in columns] if columns is not None else None table_data =", "Database, data_token: DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str)", "self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) ->", "self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database: return self def __exit__( self: Database, type:", "DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter = database_adapter self._registrar", "columns = [str(col) for col in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert( self:", "data_token: DataToken) -> int: with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database:", "T ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store)", "query, columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] =", "with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str) -> None: with self._db_adapter: self._registrar.drop_group(data_group)", "table_dtypes(self, data_token: DataToken) -> dict[str, DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name:", "from tanuki.data_store.data_store import DataStore T = TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class", "None, columns: Optional[list[ColumnAlias]] = None, ) -> T: with self._db_adapter: if not self.has_table(data_token):", "return self._registrar.has_table(data_token) def list_tables(self) -> list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group:", "def move_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter:", "col_id in col_ids] def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]: with self._db_adapter: store_class", "def has_table(self, data_token: DataToken) -> bool: with self._db_adapter: return self._registrar.has_table(data_token) def list_tables(self) ->", "with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken) -> int: with self._db_adapter: return", "self: Database, store_type: Type[T], data_token: DataToken, query: Optional[Query] = None, columns: Optional[list[ColumnAlias]] =", "DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) -> list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return", "self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self,", "def row_count(self, data_token: DataToken) -> int: with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database)", "from types import TracebackType from typing import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar", "self._registrar.store_type(data_token) return {column.name: column.dtype for column in store_class.columns} def has_table(self, data_token: DataToken) ->", "-> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database, source_data_token: DataToken, target_data_token:", "-> int: with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database: return self", "self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken) -> int: with self._db_adapter: return self._db_adapter.row_count(data_token)", "else None table_data = self._db_adapter.query(data_token, query, columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M]", "data_token: DataToken) -> list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for col_id", "__enter__(self: Database) -> Database: return self def __exit__( self: Database, type: Optional[Type[BaseException]] =", "None if metadata_class is not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data,", "DatabaseRegistrar from .db_exceptions import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T =", "traceback: Optional[TracebackType] = None, ) -> None: self.stop() def stop(self) -> None: self._db_adapter.stop()", "= [str(col) for col in columns] if columns is not None else None", "self: Database, data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None: with self._db_adapter:", "criteria: Query) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria)", "DataToken) -> list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns return [str(col_id) for col_id in", "target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken)", "MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token: DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token)", ".database_registrar import DatabaseRegistrar from .db_exceptions import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore", "ColumnAlias from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import Metadata from tanuki.data_store.query import Query", "data_token: DataToken, data_store: T, alignment_columns: list[ColumnAlias], ) -> None: with self._db_adapter: if not", "T = TypeVar(\"T\", bound=\"DataStore\") M = TypeVar(\"M\", bound=\"Metadata\") class Database: _db_adapter: DatabaseAdapter _registrar:", "DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self:", "alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert( self: Database, data_token: DataToken, data_store: T, alignment_columns:", "target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database,", "bool: with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with self._db_adapter: return self._registrar.list_groups()", "columns = [str(col) for col in columns] if columns is not None else", "= self._registrar.store_type(data_token).columns return [str(col_id) for col_id in col_ids] def table_dtypes(self, data_token: DataToken) ->", "self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group: str) -> bool: with self._db_adapter: return self._registrar.has_group(data_group)", "= self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] = None if metadata_class is", "MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T = TypeVar(\"T\", bound=\"DataStore\") M =", "def __enter__(self: Database) -> Database: return self def __exit__( self: Database, type: Optional[Type[BaseException]]", "import DataType from tanuki.data_store.metadata import Metadata from tanuki.data_store.query import Query from .adapter.database_adapter import", "self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self: Database, data_token: DataToken, data_store: T ) ->", "drop_group(self: Database, data_group: str) -> None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database,", "is not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store = store_class.from_rows(table_data, columns=columns, metadata=metadata) return", "Optional[Type[BaseException]] = None, value: Optional[BaseException] = None, traceback: Optional[TracebackType] = None, ) ->", "Database) -> Database: return self def __exit__( self: Database, type: Optional[Type[BaseException]] = None,", "None, traceback: Optional[TracebackType] = None, ) -> None: self.stop() def stop(self) -> None:", "self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database: return self def __exit__( self:", "self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with self._db_adapter: return self._registrar.list_groups() def list_group_tables(self, data_group: str)", "None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self:", ".data_token import DataToken from .database_registrar import DatabaseRegistrar from .db_exceptions import MissingTableError if TYPE_CHECKING:", "[str(col) for col in columns] if columns is not None else None table_data", "= DatabaseRegistrar(database_adapter) def table_columns(self, data_token: DataToken) -> list[str]: with self._db_adapter: col_ids = self._registrar.store_type(data_token).columns", "DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken) ->", "self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token)", "target_data_token) def row_count(self, data_token: DataToken) -> int: with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self:", "_db_adapter: DatabaseAdapter _registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter = database_adapter", "with self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name: column.dtype for column in store_class.columns} def", "-> None: with self._db_adapter: self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token: DataToken, target_data_token: DataToken,", "self._registrar.store_type(data_token).columns return [str(col_id) for col_id in col_ids] def table_dtypes(self, data_token: DataToken) -> dict[str,", "_registrar: DatabaseRegistrar def __init__(self, database_adapter: DatabaseAdapter) -> None: self._db_adapter = database_adapter self._registrar =", "query( self: Database, store_type: Type[T], data_token: DataToken, query: Optional[Query] = None, columns: Optional[list[ColumnAlias]]", "DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name: column.dtype for column in store_class.columns}", "= [str(col) for col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database, data_token:", "columns: Optional[list[ColumnAlias]] = None, ) -> T: with self._db_adapter: if not self.has_table(data_token): raise", "DataToken) -> int: with self._db_adapter: return self._db_adapter.row_count(data_token) def __enter__(self: Database) -> Database: return", "self._registrar.list_tables() def has_group(self, data_group: str) -> bool: with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self)", "from typing import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias", ".db_exceptions import MissingTableError if TYPE_CHECKING: from tanuki.data_store.data_store import DataStore T = TypeVar(\"T\", bound=\"DataStore\")", "MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert(", "database_adapter: DatabaseAdapter) -> None: self._db_adapter = database_adapter self._registrar = DatabaseRegistrar(database_adapter) def table_columns(self, data_token:", "data_group: str) -> list[DataToken]: with self._db_adapter: return self._registrar.list_group_tables(data_group) def query( self: Database, store_type:", "return self._registrar.list_group_tables(data_group) def query( self: Database, store_type: Type[T], data_token: DataToken, query: Optional[Query] =", "DataToken, data_store: T ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__)", "types import TracebackType from typing import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar from", "TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import ColumnAlias from tanuki.data_store.data_type import DataType from tanuki.data_store.metadata import", "-> None: with self._db_adapter: if not self._registrar.has_table(data_token): self._registrar.create_table(data_token, store_type) def insert( self: Database,", "def delete(self: Database, data_token: DataToken, criteria: Query) -> None: with self._db_adapter: if not", "import DatabaseAdapter from .data_token import DataToken from .database_registrar import DatabaseRegistrar from .db_exceptions import", "col in alignment_columns] self._db_adapter.update(data_token, data_store, columns) def upsert( self: Database, data_token: DataToken, data_store:", "store_class = self._registrar.store_type(data_token) return {column.name: column.dtype for column in store_class.columns} def has_table(self, data_token:", "-> dict[str, DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name: column.dtype for column", "self._registrar.has_table(data_token): self._registrar.create_table(data_token, data_store.__class__) self._db_adapter.insert(data_token, data_store) def update( self: Database, data_token: DataToken, data_store: T,", "target_data_token) def move_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with", "self._registrar.list_group_tables(data_group) def query( self: Database, store_type: Type[T], data_token: DataToken, query: Optional[Query] = None,", "DataToken) -> None: with self._db_adapter: self._registrar.drop_table(data_token) def drop_group(self: Database, data_group: str) -> None:", "if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token, criteria) def drop_table(self: Database, data_token: DataToken) ->", "self._registrar.has_table(data_token): raise MissingTableError(data_token) columns = [str(col) for col in alignment_columns] self._db_adapter.update(data_token, data_store, columns)", "self._registrar.drop_group(data_group) def copy_table( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with", "def list_tables(self) -> list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group: str) ->", "data_store: T, alignment_columns: list[ColumnAlias], ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise", "move_group( self: Database, source_data_token: DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token,", "alignment_columns: list[ColumnAlias], ) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) columns", ") -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database, source_data_token: DataToken,", "if columns is not None else None table_data = self._db_adapter.query(data_token, query, columns) store_class:", "list[DataToken]: with self._db_adapter: return self._registrar.list_tables() def has_group(self, data_group: str) -> bool: with self._db_adapter:", "TracebackType from typing import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar from tanuki.data_store.column_alias import", "metadata: Optional[M] = None if metadata_class is not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class)", ") -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken) -> int:", "store_type: Type[T], data_token: DataToken, query: Optional[Query] = None, columns: Optional[list[ColumnAlias]] = None, )", "data_token: DataToken) -> dict[str, DataType]: with self._db_adapter: store_class = self._registrar.store_type(data_token) return {column.name: column.dtype", "None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def copy_group( self: Database, source_data_token: DataToken, target_data_token: DataToken,", "columns is not None else None table_data = self._db_adapter.query(data_token, query, columns) store_class: Type[T]", "col in columns] if columns is not None else None table_data = self._db_adapter.query(data_token,", "with self._db_adapter: return self._registrar.has_group(data_group) def list_groups(self) -> set[str]: with self._db_adapter: return self._registrar.list_groups() def", "DataToken, criteria: Query) -> None: with self._db_adapter: if not self._registrar.has_table(data_token): raise MissingTableError(data_token) self._db_adapter.delete(data_token,", "DataToken, target_data_token: DataToken, ) -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self:", "Query from .adapter.database_adapter import DatabaseAdapter from .data_token import DataToken from .database_registrar import DatabaseRegistrar", ") -> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def move_table( self: Database, source_data_token: DataToken,", "{column.name: column.dtype for column in store_class.columns} def has_table(self, data_token: DataToken) -> bool: with", "= None if metadata_class is not None: metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class) store =", "columns = [str(col) for col in alignment_columns] self._db_adapter.upsert(data_token, data_store, columns) def delete(self: Database,", "Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M] = None if metadata_class", "self._db_adapter.query(data_token, query, columns) store_class: Type[T] = self._registrar.store_type(data_token) metadata_class: Type[M] = self._registrar.metadata_class(data_token) metadata: Optional[M]", "return self def __exit__( self: Database, type: Optional[Type[BaseException]] = None, value: Optional[BaseException] =", "-> None: with self._db_adapter: self._registrar.copy_table(source_data_token, target_data_token) def row_count(self, data_token: DataToken) -> int: with" ]
[]
[ "tmp_str inside, and removes the file after all. \"\"\" tmp_file_name = None try:", "END OF HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the name of the file", "/ AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE / VERS 30517456", "(4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859) lbh = xyz2lbh(*xyz) assert std_lbh", "which holds tmp_str inside, and removes the file after all. \"\"\" tmp_file_name =", "-1567978.8630 APPROX POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N 1 1 WAVELENGTH", "0.0000 ANTENNA: DELTA H/E/N 1 1 WAVELENGTH FACT L1/2 11 L1 L2 L5", "for manager in managers: with manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz", "/ TYPES OF OBSERV 30.0000 INTERVAL 18 LEAP SECONDS 2017 7 6 0", "GPS TIME OF FIRST OBS END OF HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns", "= None try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name", "VERS 30517456 TRM55971.00 NONE ANT # / TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION", "RINEX VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY /", "TRM55971.00 NONE ANT # / TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000", "VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE", "/ TYPES OF OBSERV S2 S5 # / TYPES OF OBSERV 30.0000 INTERVAL", "OF OBSERV 30.0000 INTERVAL 18 LEAP SECONDS 2017 7 6 0 0 0.0000000", "-996506.1670, -1567978.8630) managers = [ mktmp, StringIO, ] for manager in managers: with", "teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE ASPA MARKER NAME", "test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859) lbh =", "L2 L5 C1 P1 C2 P2 C5 S1# / TYPES OF OBSERV S2", "POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N 1 1 WAVELENGTH FACT L1/2", "gpss.coordinates.\"\"\" from io import StringIO from os import remove from contextlib import contextmanager", "ASPA MARKER NAME 50503S006 MARKER NUMBER <NAME> NGS OBSERVER / AGENCY 4733K06635 TRIMBLE", "XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N 1 1 WAVELENGTH FACT L1/2 11", "MARKER NUMBER <NAME> NGS OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC #", "from io import StringIO from os import remove from contextlib import contextmanager from", "0.0000000 GPS TIME OF FIRST OBS END OF HEADER ''' @contextmanager def mktmp(tmp_str):", "all. \"\"\" tmp_file_name = None try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close()", "/ TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA", "TRIMBLE NETR5 4.85 REC # / TYPE / VERS 30517456 TRM55971.00 NONE ANT", "yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630)", "StringIO from os import remove from contextlib import contextmanager from tempfile import NamedTemporaryFile", "xyz == std_xyz def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266,", "NETR5 4.85 REC # / TYPE / VERS 30517456 TRM55971.00 NONE ANT #", "S1# / TYPES OF OBSERV S2 S5 # / TYPES OF OBSERV 30.0000", "# / TYPES OF OBSERV 30.0000 INTERVAL 18 LEAP SECONDS 2017 7 6", "tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers = [ mktmp,", "-1567978.8630) managers = [ mktmp, StringIO, ] for manager in managers: with manager(RNX)", "mktmp(tmp_str): \"\"\"Returns the name of the file which holds tmp_str inside, and removes", "from os import remove from contextlib import contextmanager from tempfile import NamedTemporaryFile from", "from contextlib import contextmanager from tempfile import NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh", "NAME 50503S006 MARKER NUMBER <NAME> NGS OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5 4.85", "/ VERS 30517456 TRM55971.00 NONE ANT # / TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX", "30517456 TRM55971.00 NONE ANT # / TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ", "1 1 WAVELENGTH FACT L1/2 11 L1 L2 L5 C1 P1 C2 P2", "P2 C5 S1# / TYPES OF OBSERV S2 S5 # / TYPES OF", "for gpss.coordinates.\"\"\" from io import StringIO from os import remove from contextlib import", "S2 S5 # / TYPES OF OBSERV 30.0000 INTERVAL 18 LEAP SECONDS 2017", "removes the file after all. \"\"\" tmp_file_name = None try: with NamedTemporaryFile(mode='w', delete=False)", "inside, and removes the file after all. \"\"\" tmp_file_name = None try: with", "4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859) lbh = xyz2lbh(*xyz) assert std_lbh == lbh", "\"\"\" tmp_file_name = None try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name", "/ TYPE / VERS 30517456 TRM55971.00 NONE ANT # / TYPE -6100258.8690 -996506.1670", "/ TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE ASPA", "REC # / TYPE / VERS 30517456 TRM55971.00 NONE ANT # / TYPE", "tmp_file_name = None try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name =", "M (MIXED) RINEX VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN", "managers = [ mktmp, StringIO, ] for manager in managers: with manager(RNX) as", "tmp_file.name yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670,", "0.0000 0.0000 ANTENNA: DELTA H/E/N 1 1 WAVELENGTH FACT L1/2 11 L1 L2", "TYPES OF OBSERV S2 S5 # / TYPES OF OBSERV 30.0000 INTERVAL 18", "tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz", "TYPES OF OBSERV 30.0000 INTERVAL 18 LEAP SECONDS 2017 7 6 0 0", "import StringIO from os import remove from contextlib import contextmanager from tempfile import", "''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the name of the file which holds tmp_str", "std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers = [ mktmp, StringIO, ] for manager", "retrieve_xyz, xyz2lbh RNX = '''\\ 2.11 OBSERVATION DATA M (MIXED) RINEX VERSION /", "SECONDS 2017 7 6 0 0 0.0000000 GPS TIME OF FIRST OBS END", "with manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz == std_xyz def test_xyz2lbh():", "as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz == std_xyz def test_xyz2lbh(): xyz =", "P1 C2 P2 C5 S1# / TYPES OF OBSERV S2 S5 # /", "std_xyz def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859)", "NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE ASPA MARKER NAME 50503S006 MARKER", "coordinates import retrieve_xyz, xyz2lbh RNX = '''\\ 2.11 OBSERVATION DATA M (MIXED) RINEX", "xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859) lbh = xyz2lbh(*xyz)", "ANT # / TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000 0.0000 0.0000", "remove from contextlib import contextmanager from tempfile import NamedTemporaryFile from coordinates import retrieve_xyz,", "7 6 0 0 0.0000000 GPS TIME OF FIRST OBS END OF HEADER", "30.0000 INTERVAL 18 LEAP SECONDS 2017 7 6 0 0 0.0000000 GPS TIME", "StringIO, ] for manager in managers: with manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex)", "DATE ASPA MARKER NAME 50503S006 MARKER NUMBER <NAME> NGS OBSERVER / AGENCY 4733K06635", "DELTA H/E/N 1 1 WAVELENGTH FACT L1/2 11 L1 L2 L5 C1 P1", "of the file which holds tmp_str inside, and removes the file after all.", "\"\"\"Test suite for gpss.coordinates.\"\"\" from io import StringIO from os import remove from", "holds tmp_str inside, and removes the file after all. \"\"\" tmp_file_name = None", "== std_xyz def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168,", "TYPE / VERS 30517456 TRM55971.00 NONE ANT # / TYPE -6100258.8690 -996506.1670 -1567978.8630", "FIRST OBS END OF HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the name of", "import NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh RNX = '''\\ 2.11 OBSERVATION DATA", "suite for gpss.coordinates.\"\"\" from io import StringIO from os import remove from contextlib", "OF FIRST OBS END OF HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the name", "4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE / VERS 30517456 TRM55971.00 NONE", "APPROX POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N 1 1 WAVELENGTH FACT", "coding=utf8 \"\"\"Test suite for gpss.coordinates.\"\"\" from io import StringIO from os import remove", "from coordinates import retrieve_xyz, xyz2lbh RNX = '''\\ 2.11 OBSERVATION DATA M (MIXED)", "RNX = '''\\ 2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE teqc", "AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE / VERS 30517456 TRM55971.00", "RUN BY / DATE ASPA MARKER NAME 50503S006 MARKER NUMBER <NAME> NGS OBSERVER", "@contextmanager def mktmp(tmp_str): \"\"\"Returns the name of the file which holds tmp_str inside,", "\"\"\"Returns the name of the file which holds tmp_str inside, and removes the", "tmp_file_name = tmp_file.name yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz =", "remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers = [ mktmp, StringIO,", "2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859) lbh = xyz2lbh(*xyz) assert std_lbh ==", "finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers =", "BY / DATE ASPA MARKER NAME 50503S006 MARKER NUMBER <NAME> NGS OBSERVER /", "ANTENNA: DELTA H/E/N 1 1 WAVELENGTH FACT L1/2 11 L1 L2 L5 C1", "xyz = retrieve_xyz(tmp_rinex) assert xyz == std_xyz def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378,", "contextmanager from tempfile import NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh RNX = '''\\", "DATA M (MIXED) RINEX VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM /", "50503S006 MARKER NUMBER <NAME> NGS OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC", "NUMBER <NAME> NGS OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC # /", "in managers: with manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz == std_xyz", "delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name finally: if tmp_file_name:", "(-6100258.8690, -996506.1670, -1567978.8630) managers = [ mktmp, StringIO, ] for manager in managers:", "20170707 04:06:33UTCPGM / RUN BY / DATE ASPA MARKER NAME 50503S006 MARKER NUMBER", "OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE / VERS", "as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name)", "= [ mktmp, StringIO, ] for manager in managers: with manager(RNX) as tmp_rinex:", "import retrieve_xyz, xyz2lbh RNX = '''\\ 2.11 OBSERVATION DATA M (MIXED) RINEX VERSION", "/ RUN BY / DATE ASPA MARKER NAME 50503S006 MARKER NUMBER <NAME> NGS", "C1 P1 C2 P2 C5 S1# / TYPES OF OBSERV S2 S5 #", "if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers = [", "retrieve_xyz(tmp_rinex) assert xyz == std_xyz def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh", "from tempfile import NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh RNX = '''\\ 2.11", "NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh RNX = '''\\ 2.11 OBSERVATION DATA M", "<NAME> NGS OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE", "= (-6100258.8690, -996506.1670, -1567978.8630) managers = [ mktmp, StringIO, ] for manager in", "FACT L1/2 11 L1 L2 L5 C1 P1 C2 P2 C5 S1# /", "the file which holds tmp_str inside, and removes the file after all. \"\"\"", "OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM", "TIME OF FIRST OBS END OF HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the", "the file after all. \"\"\" tmp_file_name = None try: with NamedTemporaryFile(mode='w', delete=False) as", "6 0 0 0.0000000 GPS TIME OF FIRST OBS END OF HEADER '''", "tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz():", "io import StringIO from os import remove from contextlib import contextmanager from tempfile", "file which holds tmp_str inside, and removes the file after all. \"\"\" tmp_file_name", "C5 S1# / TYPES OF OBSERV S2 S5 # / TYPES OF OBSERV", "18 LEAP SECONDS 2017 7 6 0 0 0.0000000 GPS TIME OF FIRST", "and removes the file after all. \"\"\" tmp_file_name = None try: with NamedTemporaryFile(mode='w',", "MARKER NAME 50503S006 MARKER NUMBER <NAME> NGS OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5", "def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers = [ mktmp, StringIO, ]", "= tmp_file.name yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690,", "1 WAVELENGTH FACT L1/2 11 L1 L2 L5 C1 P1 C2 P2 C5", "0 0 0.0000000 GPS TIME OF FIRST OBS END OF HEADER ''' @contextmanager", "(MIXED) RINEX VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY", "= (4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859) lbh = xyz2lbh(*xyz) assert", "S5 # / TYPES OF OBSERV 30.0000 INTERVAL 18 LEAP SECONDS 2017 7", "= retrieve_xyz(tmp_rinex) assert xyz == std_xyz def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926)", "file after all. \"\"\" tmp_file_name = None try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file:", "import contextmanager from tempfile import NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh RNX =", "'''\\ 2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS", "tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def", "try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name", "OF OBSERV S2 S5 # / TYPES OF OBSERV 30.0000 INTERVAL 18 LEAP", "C2 P2 C5 S1# / TYPES OF OBSERV S2 S5 # / TYPES", "0 0.0000000 GPS TIME OF FIRST OBS END OF HEADER ''' @contextmanager def", "WAVELENGTH FACT L1/2 11 L1 L2 L5 C1 P1 C2 P2 C5 S1#", "tmp_file_name finally: if tmp_file_name: remove(tmp_file_name) def test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers", "tempfile import NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh RNX = '''\\ 2.11 OBSERVATION", "11 L1 L2 L5 C1 P1 C2 P2 C5 S1# / TYPES OF", "L1 L2 L5 C1 P1 C2 P2 C5 S1# / TYPES OF OBSERV", "H/E/N 1 1 WAVELENGTH FACT L1/2 11 L1 L2 L5 C1 P1 C2", "LEAP SECONDS 2017 7 6 0 0 0.0000000 GPS TIME OF FIRST OBS", "/ DATE ASPA MARKER NAME 50503S006 MARKER NUMBER <NAME> NGS OBSERVER / AGENCY", "the name of the file which holds tmp_str inside, and removes the file", "managers: with manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz == std_xyz def", "OF HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the name of the file which", "test_retrieve_xyz(): std_xyz = (-6100258.8690, -996506.1670, -1567978.8630) managers = [ mktmp, StringIO, ] for", "manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz == std_xyz def test_xyz2lbh(): xyz", "TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N", "name of the file which holds tmp_str inside, and removes the file after", "with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name finally:", "import remove from contextlib import contextmanager from tempfile import NamedTemporaryFile from coordinates import", "2017 7 6 0 0 0.0000000 GPS TIME OF FIRST OBS END OF", "2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707", "NONE ANT # / TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000 0.0000", "manager in managers: with manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz ==", "-996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N 1 1", "# / TYPE / VERS 30517456 TRM55971.00 NONE ANT # / TYPE -6100258.8690", "4.85 REC # / TYPE / VERS 30517456 TRM55971.00 NONE ANT # /", "L1/2 11 L1 L2 L5 C1 P1 C2 P2 C5 S1# / TYPES", "OBSERV S2 S5 # / TYPES OF OBSERV 30.0000 INTERVAL 18 LEAP SECONDS", "L5 C1 P1 C2 P2 C5 S1# / TYPES OF OBSERV S2 S5", "OBS END OF HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the name of the", "HEADER ''' @contextmanager def mktmp(tmp_str): \"\"\"Returns the name of the file which holds", "NGS OBSERVER / AGENCY 4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE /", "INTERVAL 18 LEAP SECONDS 2017 7 6 0 0 0.0000000 GPS TIME OF", "def mktmp(tmp_str): \"\"\"Returns the name of the file which holds tmp_str inside, and", "NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield tmp_file_name finally: if", "[ mktmp, StringIO, ] for manager in managers: with manager(RNX) as tmp_rinex: xyz", "<reponame>ilya-e/coordinates<gh_stars>0 # coding=utf8 \"\"\"Test suite for gpss.coordinates.\"\"\" from io import StringIO from os", "2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE ASPA MARKER NAME 50503S006", "contextlib import contextmanager from tempfile import NamedTemporaryFile from coordinates import retrieve_xyz, xyz2lbh RNX", "mktmp, StringIO, ] for manager in managers: with manager(RNX) as tmp_rinex: xyz =", "None try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str) tmp_file.close() tmp_file_name = tmp_file.name yield", "TYPE teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE ASPA MARKER", "tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert xyz == std_xyz def test_xyz2lbh(): xyz = (4121967.5664,", "os import remove from contextlib import contextmanager from tempfile import NamedTemporaryFile from coordinates", "# / TYPE -6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA:", "OBSERV 30.0000 INTERVAL 18 LEAP SECONDS 2017 7 6 0 0 0.0000000 GPS", "= '''\\ 2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE teqc 2016Nov7", "# coding=utf8 \"\"\"Test suite for gpss.coordinates.\"\"\" from io import StringIO from os import", "0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N 1 1 WAVELENGTH FACT L1/2 11 L1", "def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859) lbh", "assert xyz == std_xyz def test_xyz2lbh(): xyz = (4121967.5664, 2652172.1378, 4069036.5926) std_lbh =", "04:06:33UTCPGM / RUN BY / DATE ASPA MARKER NAME 50503S006 MARKER NUMBER <NAME>", "] for manager in managers: with manager(RNX) as tmp_rinex: xyz = retrieve_xyz(tmp_rinex) assert", "-6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ 0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N 1", "xyz2lbh RNX = '''\\ 2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE", "after all. \"\"\" tmp_file_name = None try: with NamedTemporaryFile(mode='w', delete=False) as tmp_file: tmp_file.writelines(tmp_str)" ]
[ "detected boxes to ROS type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__': # Select the", "Project # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "the try: if torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda' else: print(\"GPU not found.", "# Convert sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run fall", "\"\"\" if output_image_topic is not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher", "language governing permissions and # limitations under the License. import rospy import torch", "2) cv2.putText(image, \"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) #", "top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x + w, y +", "55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) # Convert detected boxes to ROS type", "pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector =", "\"\"\" Callback that process the input data and publishes to the corresponding topics", "y), (x + w, y + h), color, 2) cv2.putText(image, \"Detected fallen person\",", "Creates a ROS Node for fall detection :param input_image_topic: Topic from which we", "= BoundingBoxList([]) for detection in detections: fallen = detection[0].data pose = detection[2] if", "this file except in compliance with the License. # You may obtain a", "2020-2022 OpenDR European Project # # Licensed under the Apache License, Version 2.0", "is not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__': #", "bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x + w, y + h), color, 2) cv2.putText(image,", "from opendr.engine.data import Image from opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self,", "Topic to which we are publishing the annotations (if None, we are not", "ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not", "color, 1, cv2.LINE_AA) # Convert detected boxes to ROS type and publish ros_boxes", "num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start", "(if None, we are not publishing annotated fall annotations) :type fall_annotations_topic: str :param", ":type device: str \"\"\" if output_image_topic is not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image,", "opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data", "ANY KIND, either express or implied. # See the License for the specific", "limitations under the License. import rospy import torch import cv2 from vision_msgs.msg import", "bboxes = BoundingBoxList([]) for detection in detections: fallen = detection[0].data pose = detection[2]", "str \"\"\" if output_image_topic is not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else:", "verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the node and begin", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\")", "import BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates", "annotated fall annotations) :type fall_annotations_topic: str :param device: device on which we are", "= image.opencv() bboxes = BoundingBoxList([]) for detection in detections: fallen = detection[0].data pose", "annotated image) :type output_image_topic: str :param fall_annotations_topic: Topic to which we are publishing", "inference ('cpu' or 'cuda') :type device: str \"\"\" if output_image_topic is not None:", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "255) x, y, w, h = get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w, height=h,", "from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import Image from opendr.engine.target import BoundingBox, BoundingBoxList", "fall_annotations_topic is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher = None", "half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the node", "OF ANY KIND, either express or implied. # See the License for the", "color = (0, 0, 255) x, y, w, h = get_bbox(pose) bbox =", ":type input_image_topic: str :param output_image_topic: Topic to which we are publishing the annotated", "\"\"\" Start the node and begin processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic,", "w, y + h), color, 2) cv2.putText(image, \"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX,", "Node for fall detection :param input_image_topic: Topic from which we are reading the", "fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS Node for fall detection :param input_image_topic: Topic", "if torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda' else: print(\"GPU not found. Using CPU", "publishing the annotated image (if None, we are not publishing annotated image) :type", "opendr_bridge import ROSBridge from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection", "is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8')", "self.image_publisher is not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__':", ":param fall_annotations_topic: Topic to which we are publishing the annotations (if None, we", "are reading the input image :type input_image_topic: str :param output_image_topic: Topic to which", "specific language governing permissions and # limitations under the License. import rospy import", "Topic to which we are publishing the annotated image (if None, we are", "\"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin() def callback(self,", "sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') #", ":type fall_annotations_topic: str :param device: device on which we are running inference ('cpu'", "mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "to ROS type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None:", "# Select the device for running the try: if torch.cuda.is_available(): print(\"GPU found.\") device", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "h), color, 2) cv2.putText(image, \"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1,", "we are reading the input image :type input_image_topic: str :param output_image_topic: Topic to", "LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\"", "and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher", "self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__", "rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin() def callback(self, data): \"\"\" Callback", "under the License. import rospy import torch import cv2 from vision_msgs.msg import Detection2DArray", "European Project # # Licensed under the Apache License, Version 2.0 (the \"License\");", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "node started!\") rospy.spin() def callback(self, data): \"\"\" Callback that process the input data", "encoding='bgr8') # Run fall detection detections = self.fall_detector.infer(image) # Get an OpenCV image", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "#!/usr/bin/env python # Copyright 2020-2022 OpenDR European Project # # Licensed under the", "pose = detection[2] if fallen == 1: color = (0, 0, 255) x,", "name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)", "callback(self, data): \"\"\" Callback that process the input data and publishes to the", "required by applicable law or agreed to in writing, software # distributed under", "publishes to the corresponding topics :param data: input message :type data: sensor_msgs.msg.Image \"\"\"", "applicable law or agreed to in writing, software # distributed under the License", "data): \"\"\" Callback that process the input data and publishes to the corresponding", "annotations (if None, we are not publishing annotated fall annotations) :type fall_annotations_topic: str", "sensor_msgs.msg import Image as ROS_Image from opendr_bridge import ROSBridge from opendr.perception.pose_estimation import get_bbox", "None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if", "str :param device: device on which we are running inference ('cpu' or 'cuda')", "or agreed to in writing, software # distributed under the License is distributed", "rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin() def callback(self, data):", "queue_size=10) else: self.fall_publisher = None self.input_image_topic = input_image_topic self.bridge = ROSBridge() # Initialize", "output_image_topic: Topic to which we are publishing the annotated image (if None, we", "rospy.loginfo(\"Fall detection node started!\") rospy.spin() def callback(self, data): \"\"\" Callback that process the", "device: str \"\"\" if output_image_topic is not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "0.75, color, 1, cv2.LINE_AA) # Convert detected boxes to ROS type and publish", "not publishing annotated image) :type output_image_topic: str :param fall_annotations_topic: Topic to which we", "'cuda') :type device: str \"\"\" if output_image_topic is not None: self.image_publisher = rospy.Publisher(output_image_topic,", "import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import Image from opendr.engine.target import", "image = image.opencv() bboxes = BoundingBoxList([]) for detection in detections: fallen = detection[0].data", "data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin() def", "device=\"cuda\"): \"\"\" Creates a ROS Node for fall detection :param input_image_topic: Topic from", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", ":param data: input message :type data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into OpenDR", "image = self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection detections = self.fall_detector.infer(image) # Get", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection detections = self.fall_detector.infer(image) # Get an", "on which we are running inference ('cpu' or 'cuda') :type device: str \"\"\"", "License. # You may obtain a copy of the License at # #", "are not publishing annotated image) :type output_image_topic: str :param fall_annotations_topic: Topic to which", "LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import Image from opendr.engine.target import BoundingBox,", "detection[0].data pose = detection[2] if fallen == 1: color = (0, 0, 255)", "and publishes to the corresponding topics :param data: input message :type data: sensor_msgs.msg.Image", "a ROS Node for fall detection :param input_image_topic: Topic from which we are", "= FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the node and begin processing input data", "ROS Node for fall detection :param input_image_topic: Topic from which we are reading", "compliance with the License. # You may obtain a copy of the License", "str :param fall_annotations_topic: Topic to which we are publishing the annotations (if None,", "(x, y), (x + w, y + h), color, 2) cv2.putText(image, \"Detected fallen", "# Copyright 2020-2022 OpenDR European Project # # Licensed under the Apache License,", "for fall detection :param input_image_topic: Topic from which we are reading the input", "width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x + w, y + h),", "None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher = None self.input_image_topic = input_image_topic", "for the specific language governing permissions and # limitations under the License. import", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "node and begin processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall", "fallen == 1: color = (0, 0, 255) x, y, w, h =", "= ROSBridge() # Initialize the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False)", "Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection detections = self.fall_detector.infer(image) #", "# Run fall detection detections = self.fall_detector.infer(image) # Get an OpenCV image back", "permissions and # limitations under the License. import rospy import torch import cv2", "are running inference ('cpu' or 'cuda') :type device: str \"\"\" if output_image_topic is", "ROS_Image from opendr_bridge import ROSBridge from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner", "not use this file except in compliance with the License. # You may", "self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the node and begin processing", "Callback that process the input data and publishes to the corresponding topics :param", "Initialize the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\")", "data and publishes to the corresponding topics :param data: input message :type data:", "License, Version 2.0 (the \"License\"); # you may not use this file except", "= (0, 0, 255) x, y, w, h = get_bbox(pose) bbox = BoundingBox(left=x,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "which we are reading the input image :type input_image_topic: str :param output_image_topic: Topic", "str :param output_image_topic: Topic to which we are publishing the annotated image (if", "the License. import rospy import torch import cv2 from vision_msgs.msg import Detection2DArray from", "None, we are not publishing annotated fall annotations) :type fall_annotations_topic: str :param device:", "torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda' else: print(\"GPU not found. Using CPU instead.\")", "Detection2DArray from sensor_msgs.msg import Image as ROS_Image from opendr_bridge import ROSBridge from opendr.perception.pose_estimation", "Using CPU instead.\") device = 'cpu' except: device = 'cpu' fall_detection_node = FallDetectionNode(device=device)", "self.image_publisher.publish(message) if __name__ == '__main__': # Select the device for running the try:", "fall_annotations_topic: Topic to which we are publishing the annotations (if None, we are", "OpenCV image back image = image.opencv() bboxes = BoundingBoxList([]) for detection in detections:", "Image from opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\",", "output_image_topic: str :param fall_annotations_topic: Topic to which we are publishing the annotations (if", "detection in detections: fallen = detection[0].data pose = detection[2] if fallen == 1:", "# you may not use this file except in compliance with the License.", "device = 'cuda' else: print(\"GPU not found. Using CPU instead.\") device = 'cpu'", "agreed to in writing, software # distributed under the License is distributed on", "print(\"GPU found.\") device = 'cuda' else: print(\"GPU not found. Using CPU instead.\") device", "OpenDR European Project # # Licensed under the Apache License, Version 2.0 (the", "back image = image.opencv() bboxes = BoundingBoxList([]) for detection in detections: fallen =", "(the \"License\"); # you may not use this file except in compliance with", "from which we are reading the input image :type input_image_topic: str :param output_image_topic:", "Image as ROS_Image from opendr_bridge import ROSBridge from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation", "we are running inference ('cpu' or 'cuda') :type device: str \"\"\" if output_image_topic", ":type output_image_topic: str :param fall_annotations_topic: Topic to which we are publishing the annotations", "from opendr_bridge import ROSBridge from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from", "image (if None, we are not publishing annotated image) :type output_image_topic: str :param", "annotated image (if None, we are not publishing annotated image) :type output_image_topic: str", "# Unless required by applicable law or agreed to in writing, software #", "self.fall_publisher = None self.input_image_topic = input_image_topic self.bridge = ROSBridge() # Initialize the pose", "detection[2] if fallen == 1: color = (0, 0, 255) x, y, w,", "by applicable law or agreed to in writing, software # distributed under the", "else: self.fall_publisher = None self.input_image_topic = input_image_topic self.bridge = ROSBridge() # Initialize the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "'cuda' else: print(\"GPU not found. Using CPU instead.\") device = 'cpu' except: device", "if self.image_publisher is not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ ==", "Get an OpenCV image back image = image.opencv() bboxes = BoundingBoxList([]) for detection", "corresponding topics :param data: input message :type data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image", "1: color = (0, 0, 255) x, y, w, h = get_bbox(pose) bbox", "image) :type output_image_topic: str :param fall_annotations_topic: Topic to which we are publishing the", "file except in compliance with the License. # You may obtain a copy", "the corresponding topics :param data: input message :type data: sensor_msgs.msg.Image \"\"\" # Convert", "+ w, y + h), color, 2) cv2.putText(image, \"Detected fallen person\", (5, 55),", "begin processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node", "torch import cv2 from vision_msgs.msg import Detection2DArray from sensor_msgs.msg import Image as ROS_Image", ":param device: device on which we are running inference ('cpu' or 'cuda') :type", "we are not publishing annotated fall annotations) :type fall_annotations_topic: str :param device: device", "governing permissions and # limitations under the License. import rospy import torch import", "BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS", "BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x + w, y", "1, cv2.LINE_AA) # Convert detected boxes to ROS type and publish ros_boxes =", "License for the specific language governing permissions and # limitations under the License.", "we are not publishing annotated image) :type output_image_topic: str :param fall_annotations_topic: Topic to", "detection node started!\") rospy.spin() def callback(self, data): \"\"\" Callback that process the input", "to in writing, software # distributed under the License is distributed on an", "Topic from which we are reading the input image :type input_image_topic: str :param", "input_image_topic: Topic from which we are reading the input image :type input_image_topic: str", "into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection detections =", "annotations) :type fall_annotations_topic: str :param device: device on which we are running inference", "running inference ('cpu' or 'cuda') :type device: str \"\"\" if output_image_topic is not", "implied. # See the License for the specific language governing permissions and #", "if __name__ == '__main__': # Select the device for running the try: if", "input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin()", "\"License\"); # you may not use this file except in compliance with the", "Convert sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "== 1: color = (0, 0, 255) x, y, w, h = get_bbox(pose)", "License. import rospy import torch import cv2 from vision_msgs.msg import Detection2DArray from sensor_msgs.msg", "self.bridge = ROSBridge() # Initialize the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False,", "Start the node and begin processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image,", "rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher = None self.input_image_topic = input_image_topic self.bridge = ROSBridge()", "= input_image_topic self.bridge = ROSBridge() # Initialize the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device,", "or implied. # See the License for the specific language governing permissions and", "(0, 0, 255) x, y, w, h = get_bbox(pose) bbox = BoundingBox(left=x, top=y,", "opendr.engine.data import Image from opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\",", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "+ h), color, 2) cv2.putText(image, \"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x + w, y + h), color,", ":type data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data,", "from vision_msgs.msg import Detection2DArray from sensor_msgs.msg import Image as ROS_Image from opendr_bridge import", "the node and begin processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback)", "as ROS_Image from opendr_bridge import ROSBridge from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "we are publishing the annotations (if None, we are not publishing annotated fall", "fall detection detections = self.fall_detector.infer(image) # Get an OpenCV image back image =", "__init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS Node for fall detection", "= rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher = None if fall_annotations_topic is not None:", "0, 255) x, y, w, h = get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w,", "if fall_annotations_topic is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher =", "to which we are publishing the annotations (if None, we are not publishing", "# limitations under the License. import rospy import torch import cv2 from vision_msgs.msg", "Copyright 2020-2022 OpenDR European Project # # Licensed under the Apache License, Version", "input message :type data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into OpenDR Image image", "self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None: message = self.bridge.to_ros_image(Image(image),", "running the try: if torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda' else: print(\"GPU not", "not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher = None self.input_image_topic =", "or 'cuda') :type device: str \"\"\" if output_image_topic is not None: self.image_publisher =", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "the input image :type input_image_topic: str :param output_image_topic: Topic to which we are", "OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection detections = self.fall_detector.infer(image)", "found.\") device = 'cuda' else: print(\"GPU not found. Using CPU instead.\") device =", "from sensor_msgs.msg import Image as ROS_Image from opendr_bridge import ROSBridge from opendr.perception.pose_estimation import", "self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the node and", "ROSBridge() # Initialize the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\",", "not publishing annotated fall annotations) :type fall_annotations_topic: str :param device: device on which", ":param input_image_topic: Topic from which we are reading the input image :type input_image_topic:", "import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import", "use this file except in compliance with the License. # You may obtain", "output_image_topic is not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher = None", "process the input data and publishes to the corresponding topics :param data: input", "get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x", "detections: fallen = detection[0].data pose = detection[2] if fallen == 1: color =", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "if fallen == 1: color = (0, 0, 255) x, y, w, h", "__name__ == '__main__': # Select the device for running the try: if torch.cuda.is_available():", "cv2.LINE_AA) # Convert detected boxes to ROS type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes)", "def listen(self): \"\"\" Start the node and begin processing input data \"\"\" rospy.init_node('opendr_fall_detection',", "are not publishing annotated fall annotations) :type fall_annotations_topic: str :param device: device on", "None, we are not publishing annotated image) :type output_image_topic: str :param fall_annotations_topic: Topic", "2.0 (the \"License\"); # you may not use this file except in compliance", "rospy import torch import cv2 from vision_msgs.msg import Detection2DArray from sensor_msgs.msg import Image", "Detection2DArray, queue_size=10) else: self.fall_publisher = None self.input_image_topic = input_image_topic self.bridge = ROSBridge() #", "ROS_Image, queue_size=10) else: self.image_publisher = None if fall_annotations_topic is not None: self.fall_publisher =", "= self.fall_detector.infer(image) # Get an OpenCV image back image = image.opencv() bboxes =", "not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__': # Select", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "try: if torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda' else: print(\"GPU not found. Using", "and begin processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection", "the annotations (if None, we are not publishing annotated fall annotations) :type fall_annotations_topic:", ":param output_image_topic: Topic to which we are publishing the annotated image (if None,", "not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None: message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message)", "# # Unless required by applicable law or agreed to in writing, software", "FallDetectorLearner from opendr.engine.data import Image from opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode: def", "express or implied. # See the License for the specific language governing permissions", "fall annotations) :type fall_annotations_topic: str :param device: device on which we are running", "\"\"\" # Convert sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run", "None self.input_image_topic = input_image_topic self.bridge = ROSBridge() # Initialize the pose estimation self.pose_estimator", "def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS Node for fall", "image back image = image.opencv() bboxes = BoundingBoxList([]) for detection in detections: fallen", "= self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__': # Select the device for", "that process the input data and publishes to the corresponding topics :param data:", "either express or implied. # See the License for the specific language governing", "(5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) # Convert detected boxes to ROS", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "device for running the try: if torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda' else:", "for detection in detections: fallen = detection[0].data pose = detection[2] if fallen ==", "import Image as ROS_Image from opendr_bridge import ROSBridge from opendr.perception.pose_estimation import get_bbox from", "rospy.spin() def callback(self, data): \"\"\" Callback that process the input data and publishes", "listen(self): \"\"\" Start the node and begin processing input data \"\"\" rospy.init_node('opendr_fall_detection', anonymous=True)", "type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if", "for running the try: if torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda' else: print(\"GPU", "boxes to ROS type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not", "the License. # You may obtain a copy of the License at #", "y + h), color, 2) cv2.putText(image, \"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75,", "self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher = None if fall_annotations_topic is not", "the input data and publishes to the corresponding topics :param data: input message", "input data and publishes to the corresponding topics :param data: input message :type", "BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "detection :param input_image_topic: Topic from which we are reading the input image :type", "Convert detected boxes to ROS type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher", "encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__': # Select the device for running the", "is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher = None self.input_image_topic", "image :type input_image_topic: str :param output_image_topic: Topic to which we are publishing the", "started!\") rospy.spin() def callback(self, data): \"\"\" Callback that process the input data and", "x, y, w, h = get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0)", "= detection[0].data pose = detection[2] if fallen == 1: color = (0, 0,", "the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector", "input_image_topic self.bridge = ROSBridge() # Initialize the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2,", "BoundingBoxList([]) for detection in detections: fallen = detection[0].data pose = detection[2] if fallen", "message :type data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into OpenDR Image image =", "sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection detections", "import cv2 from vision_msgs.msg import Detection2DArray from sensor_msgs.msg import Image as ROS_Image from", "queue_size=10) else: self.image_publisher = None if fall_annotations_topic is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic,", "= detection[2] if fallen == 1: color = (0, 0, 255) x, y,", "(x + w, y + h), color, 2) cv2.putText(image, \"Detected fallen person\", (5,", "Select the device for running the try: if torch.cuda.is_available(): print(\"GPU found.\") device =", "= None self.input_image_topic = input_image_topic self.bridge = ROSBridge() # Initialize the pose estimation", "self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the node and begin processing input", "import ROSBridge from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import", "\"\"\" Creates a ROS Node for fall detection :param input_image_topic: Topic from which", "print(\"GPU not found. Using CPU instead.\") device = 'cpu' except: device = 'cpu'", "import rospy import torch import cv2 from vision_msgs.msg import Detection2DArray from sensor_msgs.msg import", "with the License. # You may obtain a copy of the License at", "= None if fall_annotations_topic is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else:", "self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin() def callback(self, data): \"\"\" Callback that process", "self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__': # Select the device for running", "def callback(self, data): \"\"\" Callback that process the input data and publishes to", "publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is", "input_image_topic: str :param output_image_topic: Topic to which we are publishing the annotated image", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "y, w, h = get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox)", "import torch import cv2 from vision_msgs.msg import Detection2DArray from sensor_msgs.msg import Image as", "from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import Image from", "cv2.rectangle(image, (x, y), (x + w, y + h), color, 2) cv2.putText(image, \"Detected", "self.input_image_topic = input_image_topic self.bridge = ROSBridge() # Initialize the pose estimation self.pose_estimator =", "data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8')", "= self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None:", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "vision_msgs.msg import Detection2DArray from sensor_msgs.msg import Image as ROS_Image from opendr_bridge import ROSBridge", "anonymous=True) rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin() def callback(self, data): \"\"\"", "found. Using CPU instead.\") device = 'cpu' except: device = 'cpu' fall_detection_node =", "CPU instead.\") device = 'cpu' except: device = 'cpu' fall_detection_node = FallDetectionNode(device=device) fall_detection_node.listen()", "'__main__': # Select the device for running the try: if torch.cuda.is_available(): print(\"GPU found.\")", "which we are publishing the annotated image (if None, we are not publishing", "self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher = None self.input_image_topic = input_image_topic self.bridge", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "reading the input image :type input_image_topic: str :param output_image_topic: Topic to which we", "= 'cuda' else: print(\"GPU not found. Using CPU instead.\") device = 'cpu' except:", "message = self.bridge.to_ros_image(Image(image), encoding='bgr8') self.image_publisher.publish(message) if __name__ == '__main__': # Select the device", "(if None, we are not publishing annotated image) :type output_image_topic: str :param fall_annotations_topic:", "ROS type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes)", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "import Detection2DArray from sensor_msgs.msg import Image as ROS_Image from opendr_bridge import ROSBridge from", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x +", "in detections: fallen = detection[0].data pose = detection[2] if fallen == 1: color", "python # Copyright 2020-2022 OpenDR European Project # # Licensed under the Apache", "fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) # Convert detected boxes", "FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS Node for", "h = get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x,", "self.image_publisher = None if fall_annotations_topic is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10)", "See the License for the specific language governing permissions and # limitations under", "and # limitations under the License. import rospy import torch import cv2 from", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "publishing annotated image) :type output_image_topic: str :param fall_annotations_topic: Topic to which we are", "topics :param data: input message :type data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "ROS_Image, self.callback) rospy.loginfo(\"Fall detection node started!\") rospy.spin() def callback(self, data): \"\"\" Callback that", "detection detections = self.fall_detector.infer(image) # Get an OpenCV image back image = image.opencv()", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import Image from opendr.engine.target", "is not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher = None if", "fall_annotations_topic: str :param device: device on which we are running inference ('cpu' or", "fall detection :param input_image_topic: Topic from which we are reading the input image", "= BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y), (x + w,", "FallDetectorLearner(self.pose_estimator) def listen(self): \"\"\" Start the node and begin processing input data \"\"\"", "self.bridge.to_ros_boxes(bboxes) if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None: message", "are publishing the annotated image (if None, we are not publishing annotated image)", "an OpenCV image back image = image.opencv() bboxes = BoundingBoxList([]) for detection in", "= LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def listen(self):", "not found. Using CPU instead.\") device = 'cpu' except: device = 'cpu' fall_detection_node", "self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator) def", "the specific language governing permissions and # limitations under the License. import rospy", "input image :type input_image_topic: str :param output_image_topic: Topic to which we are publishing", "data: input message :type data: sensor_msgs.msg.Image \"\"\" # Convert sensor_msgs.msg.Image into OpenDR Image", "are publishing the annotations (if None, we are not publishing annotated fall annotations)", "not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher = None if fall_annotations_topic", "w, h = get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image,", "if self.fall_publisher is not None: self.fall_publisher.publish(ros_boxes) if self.image_publisher is not None: message =", "else: self.image_publisher = None if fall_annotations_topic is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray,", "Version 2.0 (the \"License\"); # you may not use this file except in", "the annotated image (if None, we are not publishing annotated image) :type output_image_topic:", "except in compliance with the License. # You may obtain a copy of", "cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) # Convert detected boxes to ROS type and", "the device for running the try: if torch.cuda.is_available(): print(\"GPU found.\") device = 'cuda'", "device on which we are running inference ('cpu' or 'cuda') :type device: str", "rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher = None if fall_annotations_topic is not None: self.fall_publisher", "color, 2) cv2.putText(image, \"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA)", "opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import Image from opendr.engine.target import BoundingBox, BoundingBoxList class", "person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) # Convert detected boxes to", "import FallDetectorLearner from opendr.engine.data import Image from opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode:", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "== '__main__': # Select the device for running the try: if torch.cuda.is_available(): print(\"GPU", "which we are publishing the annotations (if None, we are not publishing annotated", "output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS Node for fall detection :param input_image_topic:", "opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\"", "fallen = detection[0].data pose = detection[2] if fallen == 1: color = (0,", "which we are running inference ('cpu' or 'cuda') :type device: str \"\"\" if", "ROSBridge from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner", "device: device on which we are running inference ('cpu' or 'cuda') :type device:", "publishing the annotations (if None, we are not publishing annotated fall annotations) :type", "cv2.putText(image, \"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) # Convert", "from opendr.perception.pose_estimation import get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from", "= rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher = None self.input_image_topic = input_image_topic self.bridge =", "estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True) self.pose_estimator.load(\"openpose_default\") self.fall_detector = FallDetectorLearner(self.pose_estimator)", "None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher = None if fall_annotations_topic is", "('cpu' or 'cuda') :type device: str \"\"\" if output_image_topic is not None: self.image_publisher", "get_bbox from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.perception.fall_detection import FallDetectorLearner from opendr.engine.data import Image", "we are publishing the annotated image (if None, we are not publishing annotated", "self.bridge.from_ros_image(data, encoding='bgr8') # Run fall detection detections = self.fall_detector.infer(image) # Get an OpenCV", "Run fall detection detections = self.fall_detector.infer(image) # Get an OpenCV image back image", "None if fall_annotations_topic is not None: self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10) else: self.fall_publisher", "# Convert detected boxes to ROS type and publish ros_boxes = self.bridge.to_ros_boxes(bboxes) if", "image.opencv() bboxes = BoundingBoxList([]) for detection in detections: fallen = detection[0].data pose =", "cv2 from vision_msgs.msg import Detection2DArray from sensor_msgs.msg import Image as ROS_Image from opendr_bridge", "to which we are publishing the annotated image (if None, we are not", "self.fall_detector.infer(image) # Get an OpenCV image back image = image.opencv() bboxes = BoundingBoxList([])", "from opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"):", "else: print(\"GPU not found. Using CPU instead.\") device = 'cpu' except: device =", "# Initialize the pose estimation self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2, mobilenet_use_stride=False, half_precision=False) self.pose_estimator.download(path=\".\", verbose=True)", "# Get an OpenCV image back image = image.opencv() bboxes = BoundingBoxList([]) for", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "detections = self.fall_detector.infer(image) # Get an OpenCV image back image = image.opencv() bboxes", "import Image from opendr.engine.target import BoundingBox, BoundingBoxList class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\",", "publishing annotated fall annotations) :type fall_annotations_topic: str :param device: device on which we", "input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS Node for fall detection :param", "to the corresponding topics :param data: input message :type data: sensor_msgs.msg.Image \"\"\" #", "class FallDetectionNode: def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_fall_annotated\", fall_annotations_topic=\"/opendr/falls\", device=\"cuda\"): \"\"\" Creates a ROS Node", "= get_bbox(pose) bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0) bboxes.data.append(bbox) cv2.rectangle(image, (x, y),", "\"Detected fallen person\", (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA) # Convert detected", "if output_image_topic is not None: self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10) else: self.image_publisher =" ]
[ "'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables for further analysis. Ideal", "default=False) options = parser.parse_args() ## Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized,", "== 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function ==", "coefficient; a - all at once\", default=\"p\") #parser specific for piecharts piechart_group =", "instead ./*hittable* Default: False\", default=False) # universal options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\",", "if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif", "Pearson (standard correlation coefficient); s - Spearman rank correlation; k - Kendall Tau", "'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use this option if your file names", "Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function", "files = parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to your gtf", "pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output files.\", default=None) #", "parser specific for correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson", "further analysis. Ideal to work with multiple experiments; ' 'Plot \"piechart\"s for hittable", "options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in single files\", action=\"store_true\", default=False) options =", "options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you want to", "this pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output files.\", default=None)", "dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of hits. Default: False\", default=False) # parser specific", "parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to your gtf file.\", type=str,", "action=\"store_true\", help=\"Calculate Pearson coefficient for different classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\",", "option if your file names do not suit to this pattern. Default: False\",", "different classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select", "os, argparse from argparse import RawTextHelpFormatter import gwide.methods as gtm import gwide.Classes.HittableClass as", "from following options: p - Pearson (standard correlation coefficient); s - Spearman rank", "to work with multiple experiments; ' 'Plot \"piechart\"s for hittable classes') # parser", "k - Kendall Tau correlation coefficient; a - all at once\", default=\"p\") #parser", "name. Use this option if your file names do not suit to this", "dest=\"out_prefix\", type=str, help=\"Prefix for output files.\", default=None) # parser specific for counts corr_group", "Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt", "not suit to this pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for", "help=\"Select from following options: p - Pearson (standard correlation coefficient); s - Spearman", "default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead ./*hittable* Default: False\", default=False) #", "(standard correlation coefficient); s - Spearman rank correlation; k - Kendall Tau correlation", "file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead ./*hittable* Default: False\",", "from a_b_c_hittable_reads.txt as experiment name. Use this option if your file names do", "default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output files.\", default=None) # parser specific for", "your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead ./*hittable*", "of hits. Default: False\", default=False) # parser specific for correlations corr_group = parser.add_argument_group(\"correlation", "files\", action=\"store_true\", default=False) options = parser.parse_args() ## Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file),", "hittables for further analysis. Ideal to work with multiple experiments; ' 'Plot \"piechart\"s", "option parser usage = \"For more options type -h\" description = \"Downstream analysis", "dest=\"gtf_file\", help=\"Provide the path to your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\",", "= parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for different classes separately.", "argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\";", "'\"count\" hittables for further analysis. Ideal to work with multiple experiments; ' 'Plot", "# parser specific for correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate", "= ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if options.function == 'correlation':", "files options files = parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to", "RPKM instead of hits. Default: False\", default=False) # parser specific for correlations corr_group", "required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables for", "do not suit to this pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix", "Ideal to work with multiple experiments; ' 'Plot \"piechart\"s for hittable classes') #", "create hittables using pyReadCounter then run script in the folder containing hittables\" parser", "default=\"p\") #parser specific for piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print", "piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in single files\", action=\"store_true\", default=False) options = parser.parse_args()", "for piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in single", "following options: p - Pearson (standard correlation coefficient); s - Spearman rank correlation;", "n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif", "corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of hits. Default: False\", default=False) # parser", "'Plot \"piechart\"s for hittable classes') # parser for input files options files =", "when you want to work on data normalized 'reads per Milion'. Default: False\",", "dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead ./*hittable* Default: False\", default=False) # universal options", "universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name.", "help=\"Use standard input instead ./*hittable* Default: False\", default=False) # universal options universal =", "in single files\", action=\"store_true\", default=False) options = parser.parse_args() ## Creating HittableClass object data", "parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED,", "dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use", "- Kendall Tau correlation coefficient; a - all at once\", default=\"p\") #parser specific", "'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart':", "options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of hits. Default: False\", default=False) #", "#functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\"", "HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if", "Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes 'a_b_c' from", "help=\"Use RPKM instead of hits. Default: False\", default=False) # parser specific for correlations", "choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables for further analysis.", "file names do not suit to this pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\",", "analysys Usage: create hittables using pyReadCounter then run script in the folder containing", "Pearson coefficient for different classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\",", "pyReadCounter then run script in the folder containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description)", "default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from following options: p -", "def hittable(): ## option parser usage = \"For more options type -h\" description", "read_stdin=options.stdin) #running chosen function if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function ==", "False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from following options: p", "hittables using pyReadCounter then run script in the folder containing hittables\" parser =", "action=\"store_true\", help=\"Use when you want to work on data normalized 'reads per Milion'.", "= parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to your gtf file.\",", "to your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead", "type=str, help=\"Prefix for output files.\", default=None) # parser specific for counts corr_group =", "parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables", "counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of hits.", "import os, argparse from argparse import RawTextHelpFormatter import gwide.methods as gtm import gwide.Classes.HittableClass", "import gwide.methods as gtm import gwide.Classes.HittableClass as ghc def hittable(): ## option parser", "for input files options files = parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the", "./*hittable* Default: False\", default=False) # universal options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\",", "# universal options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you", "as ghc def hittable(): ## option parser usage = \"For more options type", "Usage: create hittables using pyReadCounter then run script in the folder containing hittables\"", "work with multiple experiments; ' 'Plot \"piechart\"s for hittable classes') # parser for", "pyReadCounter. Chose type of analysys Usage: create hittables using pyReadCounter then run script", "per Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes 'a_b_c'", "Use this option if your file names do not suit to this pattern.", "in the folder containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\",", "'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables for further analysis. Ideal to", "Default: False\", default=False) # parser specific for correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\",", "= \"For more options type -h\" description = \"Downstream analysis on hittables crated", "Calculate \"correlations\"; ' '\"count\" hittables for further analysis. Ideal to work with multiple", "help=\"Calculate Pearson coefficient for different classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\",", "s - Spearman rank correlation; k - Kendall Tau correlation coefficient; a -", "Kendall Tau correlation coefficient; a - all at once\", default=\"p\") #parser specific for", "usage = \"For more options type -h\" description = \"Downstream analysis on hittables", "work on data normalized 'reads per Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\",", "data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single) elif options.function == 'classes': data.classes_to_tab() print", "\"a\"], help=\"Select from following options: p - Pearson (standard correlation coefficient); s -", "names do not suit to this pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str,", "elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single) elif options.function", "use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single) elif options.function == 'classes': data.classes_to_tab() print \"Done.\"", "correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for different", "with multiple experiments; ' 'Plot \"piechart\"s for hittable classes') # parser for input", "folder containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count',", "universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you want to work", "parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you want to work on data", "rank correlation; k - Kendall Tau correlation coefficient; a - all at once\",", "scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use this option if your", "classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from", "False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as", "argparse import RawTextHelpFormatter import gwide.methods as gtm import gwide.Classes.HittableClass as ghc def hittable():", "defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use this option if", "output files.\", default=None) # parser specific for counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\",", "hits. Default: False\", default=False) # parser specific for correlations corr_group = parser.add_argument_group(\"correlation options\")", "<reponame>tturowski/gwide<gh_stars>1-10 #!/usr/bin/env python import os, argparse from argparse import RawTextHelpFormatter import gwide.methods as", "help=\"As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use this option", "object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if options.function", "action=\"store_true\", default=False) options = parser.parse_args() ## Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name,", "coefficient for different classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\",", "'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables for further analysis. Ideal to work", "dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables for further", "for further analysis. Ideal to work with multiple experiments; ' 'Plot \"piechart\"s for", "action=\"store_true\", help=\"Use standard input instead ./*hittable* Default: False\", default=False) # universal options universal", "parser specific for counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM", "crated by pyReadCounter. Chose type of analysys Usage: create hittables using pyReadCounter then", "multiple experiments; ' 'Plot \"piechart\"s for hittable classes') # parser for input files", "- Spearman rank correlation; k - Kendall Tau correlation coefficient; a - all", "= parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in single files\", action=\"store_true\", default=False)", "False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output files.\", default=None) # parser specific", "options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\",", "as gtm import gwide.Classes.HittableClass as ghc def hittable(): ## option parser usage =", "\"s\", \"k\", \"a\"], help=\"Select from following options: p - Pearson (standard correlation coefficient);", "= \"Downstream analysis on hittables crated by pyReadCounter. Chose type of analysys Usage:", "help=\"Provide the path to your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use", "correlation coefficient); s - Spearman rank correlation; k - Kendall Tau correlation coefficient;", "\"k\", \"a\"], help=\"Select from following options: p - Pearson (standard correlation coefficient); s", "dest=\"normalized\", action=\"store_true\", help=\"Use when you want to work on data normalized 'reads per", "specific for counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead", "type of analysys Usage: create hittables using pyReadCounter then run script in the", "' '\"count\" hittables for further analysis. Ideal to work with multiple experiments; '", "RawTextHelpFormatter import gwide.methods as gtm import gwide.Classes.HittableClass as ghc def hittable(): ## option", "options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single) elif options.function ==", "a_b_c_hittable_reads.txt as experiment name. Use this option if your file names do not", "\"Downstream analysis on hittables crated by pyReadCounter. Chose type of analysys Usage: create", "dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for different classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\",", "parser.parse_args() ## Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running", "specific for correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient", "## Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen", "= parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of hits. Default: False\",", "help=\"Print hittables in single files\", action=\"store_true\", default=False) options = parser.parse_args() ## Creating HittableClass", "== 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single) elif options.function == 'classes':", "gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single) elif", "out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function", "want to work on data normalized 'reads per Milion'. Default: False\", default=False) universal.add_argument(\"-w\",", "- Pearson (standard correlation coefficient); s - Spearman rank correlation; k - Kendall", "containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart',", "all at once\", default=\"p\") #parser specific for piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\",", "gwide.methods as gtm import gwide.Classes.HittableClass as ghc def hittable(): ## option parser usage", "help=\"Use when you want to work on data normalized 'reads per Milion'. Default:", "by pyReadCounter. Chose type of analysys Usage: create hittables using pyReadCounter then run", "universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output files.\", default=None) # parser specific for counts", "once\", default=\"p\") #parser specific for piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\",", "specific for piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in", "Tau correlation coefficient; a - all at once\", default=\"p\") #parser specific for piecharts", "#parser specific for piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables", "more options type -h\" description = \"Downstream analysis on hittables crated by pyReadCounter.", "run script in the folder containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output',", "Default: False\", default=False) # universal options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\",", "options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you want to work on data normalized", "your file names do not suit to this pattern. Default: False\", default=False) universal.add_argument(\"-p\",", "universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you want to work on data normalized 'reads", "script in the folder containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True,", "normalized 'reads per Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts", "separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from following", "\"piechart\"s for hittable classes') # parser for input files options files = parser.add_argument_group('Input", "instead of hits. Default: False\", default=False) # parser specific for correlations corr_group =", "gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead ./*hittable* Default:", "argparse from argparse import RawTextHelpFormatter import gwide.methods as gtm import gwide.Classes.HittableClass as ghc", "Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from following options:", "options files = parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to your", "Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output files.\", default=None) # parser", "for different classes separately. Default: False\", default=False) corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"],", "\"--single\", dest=\"print_single\", help=\"Print hittables in single files\", action=\"store_true\", default=False) options = parser.parse_args() ##", "corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for different classes", "the path to your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard", "on hittables crated by pyReadCounter. Chose type of analysys Usage: create hittables using", "experiments; ' 'Plot \"piechart\"s for hittable classes') # parser for input files options", "from argparse import RawTextHelpFormatter import gwide.methods as gtm import gwide.Classes.HittableClass as ghc def", "# parser specific for counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use", "parser usage = \"For more options type -h\" description = \"Downstream analysis on", "files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\",", "data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single)", "Spearman rank correlation; k - Kendall Tau correlation coefficient; a - all at", "for hittable classes') # parser for input files options files = parser.add_argument_group('Input file", "coefficient); s - Spearman rank correlation; k - Kendall Tau correlation coefficient; a", "#running chosen function if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count':", "' 'Plot \"piechart\"s for hittable classes') # parser for input files options files", "parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of hits. Default: False\", default=False)", "- all at once\", default=\"p\") #parser specific for piecharts piechart_group = parser.add_argument_group(\"piechart options\")", "at once\", default=\"p\") #parser specific for piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\",", "options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for different classes separately. Default: False\",", "type -h\" description = \"Downstream analysis on hittables crated by pyReadCounter. Chose type", "Chose type of analysys Usage: create hittables using pyReadCounter then run script in", "hittables in single files\", action=\"store_true\", default=False) options = parser.parse_args() ## Creating HittableClass object", "you want to work on data normalized 'reads per Milion'. Default: False\", default=False)", "hittable classes') # parser for input files options files = parser.add_argument_group('Input file options')", "ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if options.function == 'correlation': data.correlation(output=options.output,", "to work on data normalized 'reads per Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\",", "dest=\"print_single\", help=\"Print hittables in single files\", action=\"store_true\", default=False) options = parser.parse_args() ## Creating", "for correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for", "input instead ./*hittable* Default: False\", default=False) # universal options universal = parser.add_argument_group('universal options')", "analysis on hittables crated by pyReadCounter. Chose type of analysys Usage: create hittables", "default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment", "corr_group.add_argument(\"-o\", dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from following options: p - Pearson", "ghc def hittable(): ## option parser usage = \"For more options type -h\"", "type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead ./*hittable* Default: False\", default=False)", "input files options files = parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path", "if your file names do not suit to this pattern. Default: False\", default=False)", "path to your gtf file.\", type=str, default=None) files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input", "files.add_argument(\"--stdin\", dest=\"stdin\", action=\"store_true\", help=\"Use standard input instead ./*hittable* Default: False\", default=False) # universal", "as experiment name. Use this option if your file names do not suit", "suit to this pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output", "files.\", default=None) # parser specific for counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\",", "False\", default=False) # parser specific for correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\",", "corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for different classes separately. Default: False\", default=False)", "a - all at once\", default=\"p\") #parser specific for piecharts piechart_group = parser.add_argument_group(\"piechart", "# parser for input files options files = parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\",", "using pyReadCounter then run script in the folder containing hittables\" parser = argparse.ArgumentParser(usage=usage,", "corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of hits. Default:", "the folder containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation',", "universal options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you want", "= parser.parse_args() ## Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin)", "\"For more options type -h\" description = \"Downstream analysis on hittables crated by", "default=False) # universal options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when", "piecharts piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in single files\",", "default=False) # parser specific for correlations corr_group = parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\",", "False\", default=False) # universal options universal = parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use", "takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use this option if your file", "single files\", action=\"store_true\", default=False) options = parser.parse_args() ## Creating HittableClass object data =", "import RawTextHelpFormatter import gwide.methods as gtm import gwide.Classes.HittableClass as ghc def hittable(): ##", "of analysys Usage: create hittables using pyReadCounter then run script in the folder", "hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'],", "= parser.add_argument_group('universal options') universal.add_argument(\"-n\", dest=\"normalized\", action=\"store_true\", help=\"Use when you want to work on", "to this pattern. Default: False\", default=False) universal.add_argument(\"-p\", dest=\"out_prefix\", type=str, help=\"Prefix for output files.\",", "for output files.\", default=None) # parser specific for counts corr_group = parser.add_argument_group(\"counts options\")", "default=None) # parser specific for counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\",", "## option parser usage = \"For more options type -h\" description = \"Downstream", "parser.add_argument_group(\"correlation options\") corr_group.add_argument(\"-c\", dest=\"gene_class\", action=\"store_true\", help=\"Calculate Pearson coefficient for different classes separately. Default:", "choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from following options: p - Pearson (standard correlation", "options type -h\" description = \"Downstream analysis on hittables crated by pyReadCounter. Chose", "experiment name. Use this option if your file names do not suit to", "gwide.Classes.HittableClass as ghc def hittable(): ## option parser usage = \"For more options", "'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function == 'piechart': data.plot(print_single=options.print_single) elif options.function == 'classes': data.classes_to_tab()", "data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if options.function ==", "options: p - Pearson (standard correlation coefficient); s - Spearman rank correlation; k", "parser for input files options files = parser.add_argument_group('Input file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide", "correlation coefficient; a - all at once\", default=\"p\") #parser specific for piecharts piechart_group", "piechart_group = parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in single files\", action=\"store_true\",", "action=\"store_true\", help=\"Use RPKM instead of hits. Default: False\", default=False) # parser specific for", "standard input instead ./*hittable* Default: False\", default=False) # universal options universal = parser.add_argument_group('universal", "for counts corr_group = parser.add_argument_group(\"counts options\") corr_group.add_argument(\"--rpkm\", dest=\"rpkm\", action=\"store_true\", help=\"Use RPKM instead of", "then run script in the folder containing hittables\" parser = argparse.ArgumentParser(usage=usage, description=description) #functions", "this option if your file names do not suit to this pattern. Default:", "dest=\"output\", choices=[\"p\", \"s\", \"k\", \"a\"], help=\"Select from following options: p - Pearson (standard", "gtm import gwide.Classes.HittableClass as ghc def hittable(): ## option parser usage = \"For", "python import os, argparse from argparse import RawTextHelpFormatter import gwide.methods as gtm import", "hittable(): ## option parser usage = \"For more options type -h\" description =", "help=\"Prefix for output files.\", default=None) # parser specific for counts corr_group = parser.add_argument_group(\"counts", "correlation; k - Kendall Tau correlation coefficient; a - all at once\", default=\"p\")", "options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm) elif options.function", "\"correlations\"; ' '\"count\" hittables for further analysis. Ideal to work with multiple experiments;", "-h\" description = \"Downstream analysis on hittables crated by pyReadCounter. Chose type of", "classes') # parser for input files options files = parser.add_argument_group('Input file options') files.add_argument(\"-g\",", "on data normalized 'reads per Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As", "whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin) #running chosen function if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class)", "import gwide.Classes.HittableClass as ghc def hittable(): ## option parser usage = \"For more", "data normalized 'reads per Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls", "description = \"Downstream analysis on hittables crated by pyReadCounter. Chose type of analysys", "chosen function if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized,", "file options') files.add_argument(\"-g\", dest=\"gtf_file\", help=\"Provide the path to your gtf file.\", type=str, default=None)", "description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate \"correlations\"; '", "options = parser.parse_args() ## Creating HittableClass object data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix,", "'reads per Milion'. Default: False\", default=False) universal.add_argument(\"-w\", dest=\"whole_name\", action=\"store_true\", help=\"As defauls scripts takes", "#!/usr/bin/env python import os, argparse from argparse import RawTextHelpFormatter import gwide.methods as gtm", "p - Pearson (standard correlation coefficient); s - Spearman rank correlation; k -", "hittables crated by pyReadCounter. Chose type of analysys Usage: create hittables using pyReadCounter", "function if options.function == 'correlation': data.correlation(output=options.output, gene_class=options.gene_class) elif options.function == 'count': data.count(normalize=options.normalized, use_RPKM=options.rpkm)", "action=\"store_true\", help=\"As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use this", "= argparse.ArgumentParser(usage=usage, description=description) #functions parser.add_argument('--output', required=True, dest=\"function\", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate", "help='REQUIRED, Calculate \"correlations\"; ' '\"count\" hittables for further analysis. Ideal to work with", "parser.add_argument_group(\"piechart options\") piechart_group.add_argument(\"-s\", \"--single\", dest=\"print_single\", help=\"Print hittables in single files\", action=\"store_true\", default=False) options", "analysis. Ideal to work with multiple experiments; ' 'Plot \"piechart\"s for hittable classes')" ]
[ "10)] for i in range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode =", "<filename>data_gen.py import re from glob import glob import numpy as np from torch.utils.data", "self.mode == 'train': return len(self.train[0]) elif self.mode == 'valid': return len(self.valid[0]) elif self.mode", "+ ['<EOS>'] for token in question: self.build_vocab(token) question = [self.QA.VOCAB[token] for token in", "max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen) else len(sen) max_context_len = min(max_context_len, 70)", "\"\", \"S\": \"\"} counter = 0 id_map = {} line = line.strip() line", "== 'train': return len(self.train[0]) elif self.mode == 'valid': return len(self.valid[0]) elif self.mode ==", "task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] = [] # Supporting facts for", "num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks", "np.zeros((max_context_len, max_context_sen_len)) for j, sen in enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len -", "counter = 0 id_map = {} line = line.strip() line = line.replace('.', '", "return train, test def build_vocab(raw_babi): lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types", "'test' in path: with open(path, 'r') as fp: test = fp.read() return train,", "re.findall('[a-zA-Z]+', lowered) types = set(tokens) return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi):", "- len(question)), 'constant', constant_values=0) batch[i] = (context, question, answer) return default_collate(batch) class BabiDataset(Dataset):", "in question: self.build_vocab(token) question = [self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower()) answer =", "[self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return", "self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'} self.train", "token in con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for token in sentence] for sentence", "def build_vocab(raw_babi): lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types = set(tokens) return", "task[\"C\"] += line + '<line>' id_map[id] = counter counter += 1 else: idx", "== '__main__': dset_train = BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for", "for sentence in context] question = qa['Q'].lower().split() + ['<EOS>'] for token in question:", "if not token in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] =", "raw_test = get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB", "*av, **kav): dict.__init__(self, *av, **kav) self.__dict__ = self def pad_collate(batch): max_context_sen_len = float('-inf')", "for token in sentence] for sentence in context] question = qa['Q'].lower().split() + ['<EOS>']", "def get_unindexed_qa(raw_babi): tasks = [] task = None babi = raw_babi.strip().split('\\n') for i,", "in context: max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen) else len(sen) max_context_len =", "max_question_len - len(question)), 'constant', constant_values=0) batch[i] = (context, question, answer) return default_collate(batch) class", "__len__(self): if self.mode == 'train': return len(self.train[0]) elif self.mode == 'valid': return len(self.valid[0])", "tmp = line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] =", "def __len__(self): if self.mode == 'train': return len(self.train[0]) elif self.mode == 'valid': return", "question = [self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question)", "fp.read() elif 'test' in path: with open(path, 'r') as fp: test = fp.read()", "in range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode = mode def __len__(self):", "default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode", "\"\", \"A\": \"\", \"S\": \"\"} counter = 0 id_map = {} line =", "np from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset", "{0: '<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for", "self.mode = mode def __len__(self): if self.mode == 'train': return len(self.train[0]) elif self.mode", "is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data in enumerate(train_loader): contexts,", "+= 1 else: idx = line.find('?') tmp = line[idx + 1:].split('\\t') task[\"Q\"] =", "question, answer = elem _context = _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for j,", "sentence in context] question = qa['Q'].lower().split() + ['<EOS>'] for token in question: self.build_vocab(token)", "= max_context_sen_len if max_context_sen_len > len(sen) else len(sen) max_context_len = min(max_context_len, 70) for", "import numpy as np from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from", "mode raw_train, raw_test = get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>':", "= len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid))", "for qa in unindexed: context = [c.lower().split() + ['<EOS>'] for c in qa['C']]", "max_context_len = max_context_len if max_context_len > len(context) else len(context) max_question_len = max_question_len if", "tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__ == '__main__': dset_train = BabiDataset(20,", "else: idx = line.find('?') tmp = line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"]", "min(max_context_len, 70) for i, elem in enumerate(batch): _context, question, answer = elem _context", "task = None babi = raw_babi.strip().split('\\n') for i, line in enumerate(babi): id =", "class adict(dict): def __init__(self, *av, **kav): dict.__init__(self, *av, **kav) self.__dict__ = self def", "self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test = get_raw_babi(task_id) self.QA = adict()", "tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__ == '__main__': dset_train = BabiDataset(20, is_train=True) train_loader", "> len(context) else len(context) max_question_len = max_question_len if max_question_len > len(question) else len(question)", "id_map[id] = counter counter += 1 else: idx = line.find('?') tmp = line[idx", "idx = line.find('?') tmp = line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] =", "= get_unindexed_qa(raw_babi) questions = [] contexts = [] answers = [] for qa", "{'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train)", "= (context, question, answer) return default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path", "lowered) types = set(tokens) return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks", "counter += 1 else: idx = line.find('?') tmp = line[idx + 1:].split('\\t') task[\"Q\"]", "line.find('?') tmp = line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"]", "return len(self.valid[0]) elif self.mode == 'test': return len(self.test[0]) def __getitem__(self, index): if self.mode", "types = set(tokens) return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks =", "in context] question = qa['Q'].lower().split() + ['<EOS>'] for token in question: self.build_vocab(token) question", "self.mode == 'valid': contexts, questions, answers = self.valid elif self.mode == 'test': contexts,", "next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths =", "raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types = set(tokens) return types # adapted from", "from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset class", "if 'train' in path: with open(path, 'r') as fp: train = fp.read() elif", "line = line[line.find(' ') + 1:] # if not a question if line.find('?')", "len(self.train[i]) / 10)] for i in range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode):", "token in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token def", "for elem in batch: context, question, _ = elem max_context_len = max_context_len if", "in enumerate(babi): id = int(line[0:line.find(' ')]) if id == 1: task = {\"C\":", "= float('-inf') for elem in batch: context, question, _ = elem max_context_len =", "\"\"} counter = 0 id_map = {} line = line.strip() line = line.replace('.',", "in context: for token in con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for token in", "if line.find('?') == -1: task[\"C\"] += line + '<line>' id_map[id] = counter counter", "elem in batch: context, question, _ = elem max_context_len = max_context_len if max_context_len", "= 0 id_map = {} line = line.strip() line = line.replace('.', ' .", "token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if 'train' in", "with open(path, 'r') as fp: train = fp.read() elif 'test' in path: with", "counter counter += 1 else: idx = line.find('?') tmp = line[idx + 1:].split('\\t')", "len(self.valid[0]) elif self.mode == 'test': return len(self.test[0]) def __getitem__(self, index): if self.mode ==", "question: self.build_vocab(token) question = [self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()]", "line + '<line>' id_map[id] = counter counter += 1 else: idx = line.find('?')", "sentence] for sentence in context] question = qa['Q'].lower().split() + ['<EOS>'] for token in", "line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] = [] # Supporting facts for num in", "answers[index] def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions = [] contexts = []", "\"Q\": \"\", \"A\": \"\", \"S\": \"\"} counter = 0 id_map = {} line", "= None babi = raw_babi.strip().split('\\n') for i, line in enumerate(babi): id = int(line[0:line.find('", "get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if 'train' in path: with", "{\"C\": \"\", \"Q\": \"\", \"A\": \"\", \"S\": \"\"} counter = 0 id_map =", "1 else: idx = line.find('?') tmp = line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx]", "len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for", "float('-inf') max_question_len = float('-inf') for elem in batch: context, question, _ = elem", "import re from glob import glob import numpy as np from torch.utils.data import", "next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths:", "con in context: for token in con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for token", "= self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode = mode def __len__(self): if self.mode ==", "in enumerate(batch): _context, question, answer = elem _context = _context[-max_context_len:] context = np.zeros((max_context_len,", "shuffle=True, collate_fn=pad_collate) for batch_idx, data in enumerate(train_loader): contexts, questions, answers = data break", "context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0) question = np.pad(question, (0,", "+ '<line>' id_map[id] = counter counter += 1 else: idx = line.find('?') tmp", "i, elem in enumerate(batch): _context, question, answer = elem _context = _context[-max_context_len:] context", "/ 10)] for i in range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode", "'<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid =", "return len(self.test[0]) def __getitem__(self, index): if self.mode == 'train': contexts, questions, answers =", "https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = [] task = None babi = raw_babi.strip().split('\\n') for", "-1: task[\"C\"] += line + '<line>' id_map[id] = counter counter += 1 else:", "= elem _context = _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for j, sen in", "self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path", "= DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data in enumerate(train_loader): contexts, questions, answers", "'<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i", "'__main__': dset_train = BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx,", "= self.test return contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions", "path: with open(path, 'r') as fp: train = fp.read() elif 'test' in path:", "answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions, answers) def build_vocab(self, token):", "id_map = {} line = line.strip() line = line.replace('.', ' . ') line", "line.replace('.', ' . ') line = line[line.find(' ') + 1:] # if not", "= line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] = []", "= float('-inf') max_context_len = float('-inf') max_question_len = float('-inf') for elem in batch: context,", "self.mode == 'test': contexts, questions, answers = self.test return contexts[index], questions[index], answers[index] def", "__name__ == '__main__': dset_train = BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate)", "return len(self.train[0]) elif self.mode == 'valid': return len(self.valid[0]) elif self.mode == 'test': return", "path: with open(path, 'r') as fp: test = fp.read() return train, test def", "from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset class adict(dict): def __init__(self, *av,", "self.QA = adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>',", "[] answers = [] for qa in unindexed: context = [c.lower().split() + ['<EOS>']", "1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i in", "contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions, answers) def build_vocab(self, token): if not token", "len(sen) else len(sen) max_context_len = min(max_context_len, 70) for i, elem in enumerate(batch): _context,", "= fp.read() elif 'test' in path: with open(path, 'r') as fp: test =", "70) for i, elem in enumerate(batch): _context, question, answer = elem _context =", "> len(question) else len(question) for sen in context: max_context_sen_len = max_context_sen_len if max_context_sen_len", "answers = self.test return contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi)", "raw_babi.strip().split('\\n') for i, line in enumerate(babi): id = int(line[0:line.find(' ')]) if id ==", "BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data in enumerate(train_loader):", "len(question) else len(question) for sen in context: max_context_sen_len = max_context_sen_len if max_context_sen_len >", "= _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for j, sen in enumerate(_context): context[j] =", "# if not a question if line.find('?') == -1: task[\"C\"] += line +", "'train': return len(self.train[0]) elif self.mode == 'valid': return len(self.valid[0]) elif self.mode == 'test':", "= set(tokens) return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = []", "tasks = [] task = None babi = raw_babi.strip().split('\\n') for i, line in", "test def build_vocab(raw_babi): lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types = set(tokens)", "range(3)] self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)] for i in range(3)] self.test", "[] task = None babi = raw_babi.strip().split('\\n') for i, line in enumerate(babi): id", "= float('-inf') max_question_len = float('-inf') for elem in batch: context, question, _ =", "= get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB =", "in enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0) question =", "/ 10):] for i in range(3)] self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)]", "for path in paths: if 'train' in path: with open(path, 'r') as fp:", "types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = [] task = None", "len(context) else len(context) max_question_len = max_question_len if max_question_len > len(question) else len(question) for", "= line[line.find(' ') + 1:] # if not a question if line.find('?') ==", "glob import numpy as np from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate", "= token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if 'train'", "for i, elem in enumerate(batch): _context, question, answer = elem _context = _context[-max_context_len:]", "'r') as fp: train = fp.read() elif 'test' in path: with open(path, 'r')", "= [[self.QA.VOCAB[token] for token in sentence] for sentence in context] question = qa['Q'].lower().split()", "return tasks if __name__ == '__main__': dset_train = BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train,", "enumerate(babi): id = int(line[0:line.find(' ')]) if id == 1: task = {\"C\": \"\",", "int(line[0:line.find(' ')]) if id == 1: task = {\"C\": \"\", \"Q\": \"\", \"A\":", "max_context_len = float('-inf') max_question_len = float('-inf') for elem in batch: context, question, _", "for j, sen in enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant',", "+ ['<EOS>'] for c in qa['C']] for con in context: for token in", "= {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'} self.train =", "contexts, questions, answers = self.train elif self.mode == 'valid': contexts, questions, answers =", "'valid': contexts, questions, answers = self.valid elif self.mode == 'test': contexts, questions, answers", "contexts = [] answers = [] for qa in unindexed: context = [c.lower().split()", "elif 'test' in path: with open(path, 'r') as fp: test = fp.read() return", "from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = [] task = None babi = raw_babi.strip().split('\\n')", "in con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for token in sentence] for sentence in", "float('-inf') for elem in batch: context, question, _ = elem max_context_len = max_context_len", "range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode = mode def __len__(self): if", "= np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0) question = np.pad(question, (0, max_question_len", "for c in qa['C']] for con in context: for token in con: self.build_vocab(token)", "['<EOS>'] for c in qa['C']] for con in context: for token in con:", "answers) def build_vocab(self, token): if not token in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token]", "self.mode = mode raw_train, raw_test = get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB = {'<PAD>':", "def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if 'train' in path:", "elif self.mode == 'valid': return len(self.valid[0]) elif self.mode == 'test': return len(self.test[0]) def", "j, sen in enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0)", "[] # Supporting facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C']", "= [] for qa in unindexed: context = [c.lower().split() + ['<EOS>'] for c", "= raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types = set(tokens) return types # adapted", "= [] # Supporting facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy()", "* len(self.train[i]) / 10)] for i in range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self,", "default_collate from torch.utils.data.dataset import Dataset class adict(dict): def __init__(self, *av, **kav): dict.__init__(self, *av,", "len(sen) max_context_len = min(max_context_len, 70) for i, elem in enumerate(batch): _context, question, answer", "self.test return contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions =", "task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test = get_raw_babi(task_id) self.QA", "1:] # if not a question if line.find('?') == -1: task[\"C\"] += line", "== 'valid': contexts, questions, answers = self.valid elif self.mode == 'test': contexts, questions,", "else len(context) max_question_len = max_question_len if max_question_len > len(question) else len(question) for sen", "max_context_len if max_context_len > len(context) else len(context) max_question_len = max_question_len if max_question_len >", "id == 1: task = {\"C\": \"\", \"Q\": \"\", \"A\": \"\", \"S\": \"\"}", "self def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len = float('-inf') max_question_len = float('-inf') for", "== 'train': contexts, questions, answers = self.train elif self.mode == 'valid': contexts, questions,", "= counter counter += 1 else: idx = line.find('?') tmp = line[idx +", "fp: train = fp.read() elif 'test' in path: with open(path, 'r') as fp:", "# Supporting facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] =", "answer) return default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode", "unindexed = get_unindexed_qa(raw_babi) questions = [] contexts = [] answers = [] for", "= np.zeros((max_context_len, max_context_sen_len)) for j, sen in enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len", "def build_vocab(self, token): if not token in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] =", "= qa['Q'].lower().split() + ['<EOS>'] for token in question: self.build_vocab(token) question = [self.QA.VOCAB[token] for", "= fp.read() return train, test def build_vocab(raw_babi): lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+',", "numpy as np from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset", "qa in unindexed: context = [c.lower().split() + ['<EOS>'] for c in qa['C']] for", "== 1: task = {\"C\": \"\", \"Q\": \"\", \"A\": \"\", \"S\": \"\"} counter", "__init__(self, *av, **kav): dict.__init__(self, *av, **kav) self.__dict__ = self def pad_collate(batch): max_context_sen_len =", "context = [[self.QA.VOCAB[token] for token in sentence] for sentence in context] question =", "in qa['C']] for con in context: for token in con: self.build_vocab(token) context =", "self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions, answers) def build_vocab(self,", "if __name__ == '__main__': dset_train = BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True,", "\"S\": \"\"} counter = 0 id_map = {} line = line.strip() line =", "self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions, answers) def build_vocab(self, token): if not", "- len(sen)), 'constant', constant_values=0) question = np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0)", "(context, question, answer) return default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path =", "context, question, _ = elem max_context_len = max_context_len if max_context_len > len(context) else", "') line = line[line.find(' ') + 1:] # if not a question if", "'test': return len(self.test[0]) def __getitem__(self, index): if self.mode == 'train': contexts, questions, answers", "1} self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i])", "context = np.zeros((max_context_len, max_context_sen_len)) for j, sen in enumerate(_context): context[j] = np.pad(sen, (0,", "unindexed: context = [c.lower().split() + ['<EOS>'] for c in qa['C']] for con in", "\"A\": \"\", \"S\": \"\"} counter = 0 id_map = {} line = line.strip()", "= min(max_context_len, 70) for i, elem in enumerate(batch): _context, question, answer = elem", "for sen in context: max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen) else len(sen)", "in range(3)] self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)] for i in range(3)]", "get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB = {0:", "in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions, answers)", "return contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions = []", "constant_values=0) question = np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0) batch[i] = (context,", "len(context) max_question_len = max_question_len if max_question_len > len(question) else len(question) for sen in", "= self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions, answers) def build_vocab(self, token): if", "context = [c.lower().split() + ['<EOS>'] for c in qa['C']] for con in context:", "self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)] for i in range(3)] self.test =", "'<line>' id_map[id] = counter counter += 1 else: idx = line.find('?') tmp =", "__init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test = get_raw_babi(task_id)", "**kav): dict.__init__(self, *av, **kav) self.__dict__ = self def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len", "tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__ == '__main__':", "max_context_sen_len)) for j, sen in enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)),", "len(self.test[0]) def __getitem__(self, index): if self.mode == 'train': contexts, questions, answers = self.train", "if max_context_len > len(context) else len(context) max_question_len = max_question_len if max_question_len > len(question)", "contexts, questions, answers = self.test return contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed", "= self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)] self.train =", "'train': contexts, questions, answers = self.train elif self.mode == 'valid': contexts, questions, answers", "10):] for i in range(3)] self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)] for", "= line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] = [] # Supporting facts for num", "def __init__(self, *av, **kav): dict.__init__(self, *av, **kav) self.__dict__ = self def pad_collate(batch): max_context_sen_len", "(0, max_context_sen_len - len(sen)), 'constant', constant_values=0) question = np.pad(question, (0, max_question_len - len(question)),", "for con in context: for token in con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for", "max_context_len = min(max_context_len, 70) for i, elem in enumerate(batch): _context, question, answer =", "as np from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import", "= [] answers = [] for qa in unindexed: context = [c.lower().split() +", "def __getitem__(self, index): if self.mode == 'train': contexts, questions, answers = self.train elif", "= {} line = line.strip() line = line.replace('.', ' . ') line =", "elem in enumerate(batch): _context, question, answer = elem _context = _context[-max_context_len:] context =", "not a question if line.find('?') == -1: task[\"C\"] += line + '<line>' id_map[id]", "if self.mode == 'train': return len(self.train[0]) elif self.mode == 'valid': return len(self.valid[0]) elif", "'r') as fp: test = fp.read() return train, test def build_vocab(raw_babi): lowered =", "self.valid elif self.mode == 'test': contexts, questions, answers = self.test return contexts[index], questions[index],", "index): if self.mode == 'train': contexts, questions, answers = self.train elif self.mode ==", "glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if 'train' in path: with open(path, 'r') as", "[self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)] self.train = [self.train[i][:int(9 * len(self.train[i]) /", "for token in con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for token in sentence] for", "raw_train, raw_test = get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1}", "line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] = [] #", "line in enumerate(babi): id = int(line[0:line.find(' ')]) if id == 1: task =", "> len(sen) else len(sen) max_context_len = min(max_context_len, 70) for i, elem in enumerate(batch):", "questions.append(question) answers.append(answer) return (contexts, questions, answers) def build_vocab(self, token): if not token in", "paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if 'train' in path: with open(path,", "qa['Q'].lower().split() + ['<EOS>'] for token in question: self.build_vocab(token) question = [self.QA.VOCAB[token] for token", "self.mode == 'valid': return len(self.valid[0]) elif self.mode == 'test': return len(self.test[0]) def __getitem__(self,", "len(question)), 'constant', constant_values=0) batch[i] = (context, question, answer) return default_collate(batch) class BabiDataset(Dataset): def", "max_context_sen_len = float('-inf') max_context_len = float('-inf') max_question_len = float('-inf') for elem in batch:", "tasks if __name__ == '__main__': dset_train = BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2,", "in path: with open(path, 'r') as fp: train = fp.read() elif 'test' in", "max_question_len = max_question_len if max_question_len > len(question) else len(question) for sen in context:", "= tmp[1].strip() task[\"S\"] = [] # Supporting facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())])", "i, line in enumerate(babi): id = int(line[0:line.find(' ')]) if id == 1: task", "paths: if 'train' in path: with open(path, 'r') as fp: train = fp.read()", "if not a question if line.find('?') == -1: task[\"C\"] += line + '<line>'", "_context, question, answer = elem _context = _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for", "'test': contexts, questions, answers = self.test return contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi):", "def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions = [] contexts = [] answers", "'<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)]", "i in range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode = mode def", "self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)] self.train = [self.train[i][:int(9 *", "task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__ == '__main__': dset_train =", "max_question_len > len(question) else len(question) for sen in context: max_context_sen_len = max_context_sen_len if", "line.strip() line = line.replace('.', ' . ') line = line[line.find(' ') + 1:]", "max_context_len > len(context) else len(context) max_question_len = max_question_len if max_question_len > len(question) else", "answers = self.train elif self.mode == 'valid': contexts, questions, answers = self.valid elif", "question = qa['Q'].lower().split() + ['<EOS>'] for token in question: self.build_vocab(token) question = [self.QA.VOCAB[token]", "'train' in path: with open(path, 'r') as fp: train = fp.read() elif 'test'", "mode def __len__(self): if self.mode == 'train': return len(self.train[0]) elif self.mode == 'valid':", "= line.replace('.', ' . ') line = line[line.find(' ') + 1:] # if", "= re.findall('[a-zA-Z]+', lowered) types = set(tokens) return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def", "enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0) question = np.pad(question,", "self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode = mode def __len__(self): if self.mode == 'train':", "self.train elif self.mode == 'valid': contexts, questions, answers = self.valid elif self.mode ==", "c in qa['C']] for con in context: for token in con: self.build_vocab(token) context", "qa['C']] for con in context: for token in con: self.build_vocab(token) context = [[self.QA.VOCAB[token]", "question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions, answers) def", "self.__dict__ = self def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len = float('-inf') max_question_len =", "raw_babi): unindexed = get_unindexed_qa(raw_babi) questions = [] contexts = [] answers = []", "else len(sen) max_context_len = min(max_context_len, 70) for i, elem in enumerate(batch): _context, question,", "'constant', constant_values=0) batch[i] = (context, question, answer) return default_collate(batch) class BabiDataset(Dataset): def __init__(self,", "line.find('?') == -1: task[\"C\"] += line + '<line>' id_map[id] = counter counter +=", "self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if", "else len(question) for sen in context: max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen)", "contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions = [] contexts", "= [] task = None babi = raw_babi.strip().split('\\n') for i, line in enumerate(babi):", "import glob import numpy as np from torch.utils.data import DataLoader from torch.utils.data.dataloader import", "max_context_sen_len if max_context_sen_len > len(sen) else len(sen) max_context_len = min(max_context_len, 70) for i,", "answers = [] for qa in unindexed: context = [c.lower().split() + ['<EOS>'] for", "__getitem__(self, index): if self.mode == 'train': contexts, questions, answers = self.train elif self.mode", "task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__ ==", "np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0) batch[i] = (context, question, answer) return", "= tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__ == '__main__': dset_train = BabiDataset(20, is_train=True)", "_context = _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for j, sen in enumerate(_context): context[j]", "if max_question_len > len(question) else len(question) for sen in context: max_context_sen_len = max_context_sen_len", "mode): self.mode = mode def __len__(self): if self.mode == 'train': return len(self.train[0]) elif", "[] contexts = [] answers = [] for qa in unindexed: context =", "batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data in enumerate(train_loader): contexts, questions, answers = data", "**kav) self.__dict__ = self def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len = float('-inf') max_question_len", "self.build_vocab(token) question = [self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context)", "None babi = raw_babi.strip().split('\\n') for i, line in enumerate(babi): id = int(line[0:line.find(' ')])", "') + 1:] # if not a question if line.find('?') == -1: task[\"C\"]", "questions, answers = self.valid elif self.mode == 'test': contexts, questions, answers = self.test", "= max_question_len if max_question_len > len(question) else len(question) for sen in context: max_context_sen_len", "adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'}", "con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for token in sentence] for sentence in context]", "task[\"S\"] = [] # Supporting facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc =", "tmp[1].strip() task[\"S\"] = [] # Supporting facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc", "open(path, 'r') as fp: train = fp.read() elif 'test' in path: with open(path,", "lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types = set(tokens) return types #", "for i in range(3)] self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode = mode", "test = fp.read() return train, test def build_vocab(raw_babi): lowered = raw_babi.lower() tokens =", "{} line = line.strip() line = line.replace('.', ' . ') line = line[line.find('", "a question if line.find('?') == -1: task[\"C\"] += line + '<line>' id_map[id] =", "context: for token in con: self.build_vocab(token) context = [[self.QA.VOCAB[token] for token in sentence]", "= [self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer)", "== 'valid': return len(self.valid[0]) elif self.mode == 'test': return len(self.test[0]) def __getitem__(self, index):", "= [self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)] self.train = [self.train[i][:int(9 * len(self.train[i])", "answers.append(answer) return (contexts, questions, answers) def build_vocab(self, token): if not token in self.QA.VOCAB:", "= self.train elif self.mode == 'valid': contexts, questions, answers = self.valid elif self.mode", "in batch: context, question, _ = elem max_context_len = max_context_len if max_context_len >", "= np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0) batch[i] = (context, question, answer)", "= glob('data/en-10k/qa{}_*'.format(taskid)) for path in paths: if 'train' in path: with open(path, 'r')", "in paths: if 'train' in path: with open(path, 'r') as fp: train =", "get_unindexed_qa(raw_babi): tasks = [] task = None babi = raw_babi.strip().split('\\n') for i, line", "1: task = {\"C\": \"\", \"Q\": \"\", \"A\": \"\", \"S\": \"\"} counter =", "tasks.append(tc) return tasks if __name__ == '__main__': dset_train = BabiDataset(20, is_train=True) train_loader =", "in path: with open(path, 'r') as fp: test = fp.read() return train, test", "get_unindexed_qa(raw_babi) questions = [] contexts = [] answers = [] for qa in", "= {\"C\": \"\", \"Q\": \"\", \"A\": \"\", \"S\": \"\"} counter = 0 id_map", "constant_values=0) batch[i] = (context, question, answer) return default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id,", "== 'test': contexts, questions, answers = self.test return contexts[index], questions[index], answers[index] def get_indexed_qa(self,", "path in paths: if 'train' in path: with open(path, 'r') as fp: train", "torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset class adict(dict):", "= max_context_len if max_context_len > len(context) else len(context) max_question_len = max_question_len if max_question_len", "questions, answers) def build_vocab(self, token): if not token in self.QA.VOCAB: next_index = len(self.QA.VOCAB)", "len(self.train[0]) elif self.mode == 'valid': return len(self.valid[0]) elif self.mode == 'test': return len(self.test[0])", "def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len = float('-inf') max_question_len = float('-inf') for elem", "float('-inf') max_context_len = float('-inf') max_question_len = float('-inf') for elem in batch: context, question,", "Dataset class adict(dict): def __init__(self, *av, **kav): dict.__init__(self, *av, **kav) self.__dict__ = self", "torch.utils.data.dataset import Dataset class adict(dict): def __init__(self, *av, **kav): dict.__init__(self, *av, **kav) self.__dict__", "*av, **kav) self.__dict__ = self def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len = float('-inf')", "[self.train[i][:int(9 * len(self.train[i]) / 10)] for i in range(3)] self.test = self.get_indexed_qa(raw_test) def", "self.test = self.get_indexed_qa(raw_test) def set_mode(self, mode): self.mode = mode def __len__(self): if self.mode", "elif self.mode == 'valid': contexts, questions, answers = self.valid elif self.mode == 'test':", "max_context_sen_len - len(sen)), 'constant', constant_values=0) question = np.pad(question, (0, max_question_len - len(question)), 'constant',", "from torch.utils.data.dataset import Dataset class adict(dict): def __init__(self, *av, **kav): dict.__init__(self, *av, **kav)", "== -1: task[\"C\"] += line + '<line>' id_map[id] = counter counter += 1", "as fp: test = fp.read() return train, test def build_vocab(raw_babi): lowered = raw_babi.lower()", "elif self.mode == 'test': return len(self.test[0]) def __getitem__(self, index): if self.mode == 'train':", "get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions = [] contexts = [] answers =", "'valid': return len(self.valid[0]) elif self.mode == 'test': return len(self.test[0]) def __getitem__(self, index): if", "context: max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen) else len(sen) max_context_len = min(max_context_len,", "as fp: train = fp.read() elif 'test' in path: with open(path, 'r') as", "dset_train = BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data", "for i in range(3)] self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)] for i", "self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths", "Supporting facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1]", "batch[i] = (context, question, answer) return default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id, mode='train'):", "max_question_len if max_question_len > len(question) else len(question) for sen in context: max_context_sen_len =", "train = fp.read() elif 'test' in path: with open(path, 'r') as fp: test", "\"\", \"Q\": \"\", \"A\": \"\", \"S\": \"\"} counter = 0 id_map = {}", "elem max_context_len = max_context_len if max_context_len > len(context) else len(context) max_question_len = max_question_len", "question if line.find('?') == -1: task[\"C\"] += line + '<line>' id_map[id] = counter", "len(question) for sen in context: max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen) else", "pad_collate(batch): max_context_sen_len = float('-inf') max_context_len = float('-inf') max_question_len = float('-inf') for elem in", "i in range(3)] self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)] for i in", "len(sen)), 'constant', constant_values=0) question = np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0) batch[i]", "0, '<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid", "from glob import glob import numpy as np from torch.utils.data import DataLoader from", "tokens = re.findall('[a-zA-Z]+', lowered) types = set(tokens) return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/", "line = line.replace('.', ' . ') line = line[line.find(' ') + 1:] #", "in unindexed: context = [c.lower().split() + ['<EOS>'] for c in qa['C']] for con", "question = np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0) batch[i] = (context, question,", "task = {\"C\": \"\", \"Q\": \"\", \"A\": \"\", \"S\": \"\"} counter = 0", "import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset class adict(dict): def", "(contexts, questions, answers) def build_vocab(self, token): if not token in self.QA.VOCAB: next_index =", "in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid):", "[c.lower().split() + ['<EOS>'] for c in qa['C']] for con in context: for token", "contexts, questions, answers = self.valid elif self.mode == 'test': contexts, questions, answers =", "_context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for j, sen in enumerate(_context): context[j] = np.pad(sen,", "= line.strip() line = line.replace('.', ' . ') line = line[line.find(' ') +", "max_question_len = float('-inf') for elem in batch: context, question, _ = elem max_context_len", "max_context_sen_len > len(sen) else len(sen) max_context_len = min(max_context_len, 70) for i, elem in", "')]) if id == 1: task = {\"C\": \"\", \"Q\": \"\", \"A\": \"\",", "for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return", "1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] = [] # Supporting facts", "_ = elem max_context_len = max_context_len if max_context_len > len(context) else len(context) max_question_len", "token in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts, questions,", "token in question: self.build_vocab(token) question = [self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower()) answer", "= int(line[0:line.find(' ')]) if id == 1: task = {\"C\": \"\", \"Q\": \"\",", "= self.valid elif self.mode == 'test': contexts, questions, answers = self.test return contexts[index],", "return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = [] task =", "= adict() self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1} self.QA.IVOCAB = {0: '<PAD>', 1:", "'constant', constant_values=0) question = np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0) batch[i] =", "question, _ = elem max_context_len = max_context_len if max_context_len > len(context) else len(context)", "if self.mode == 'train': contexts, questions, answers = self.train elif self.mode == 'valid':", "line[line.find(' ') + 1:] # if not a question if line.find('?') == -1:", "= elem max_context_len = max_context_len if max_context_len > len(context) else len(context) max_question_len =", "token in sentence] for sentence in context] question = qa['Q'].lower().split() + ['<EOS>'] for", "if max_context_sen_len > len(sen) else len(sen) max_context_len = min(max_context_len, 70) for i, elem", "DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset class adict(dict): def __init__(self,", "questions, answers = self.train elif self.mode == 'valid': contexts, questions, answers = self.valid", "return default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode =", "train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data in enumerate(train_loader): contexts, questions,", "id = int(line[0:line.find(' ')]) if id == 1: task = {\"C\": \"\", \"Q\":", "self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) /", "in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if", "set_mode(self, mode): self.mode = mode def __len__(self): if self.mode == 'train': return len(self.train[0])", "' . ') line = line[line.find(' ') + 1:] # if not a", "def set_mode(self, mode): self.mode = mode def __len__(self): if self.mode == 'train': return", "for token in question] self.build_vocab(qa['A'].lower()) answer = self.QA.VOCAB[qa['A'].lower()] contexts.append(context) questions.append(question) answers.append(answer) return (contexts,", "line = line.strip() line = line.replace('.', ' . ') line = line[line.find(' ')", "set(tokens) return types # adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = [] task", "adict(dict): def __init__(self, *av, **kav): dict.__init__(self, *av, **kav) self.__dict__ = self def pad_collate(batch):", "= mode raw_train, raw_test = get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB = {'<PAD>': 0,", "elif self.mode == 'test': contexts, questions, answers = self.test return contexts[index], questions[index], answers[index]", "= [c.lower().split() + ['<EOS>'] for c in qa['C']] for con in context: for", "if id == 1: task = {\"C\": \"\", \"Q\": \"\", \"A\": \"\", \"S\":", "for i, line in enumerate(babi): id = int(line[0:line.find(' ')]) if id == 1:", "token): if not token in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index]", "answers = self.valid elif self.mode == 'test': contexts, questions, answers = self.test return", "elem _context = _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for j, sen in enumerate(_context):", "+ 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip() task[\"S\"] = [] # Supporting", "= {0: '<PAD>', 1: '<EOS>'} self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):]", "(0, max_question_len - len(question)), 'constant', constant_values=0) batch[i] = (context, question, answer) return default_collate(batch)", "adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = [] task = None babi =", "= raw_babi.strip().split('\\n') for i, line in enumerate(babi): id = int(line[0:line.find(' ')]) if id", "import Dataset class adict(dict): def __init__(self, *av, **kav): dict.__init__(self, *av, **kav) self.__dict__ =", "['<EOS>'] for token in question: self.build_vocab(token) question = [self.QA.VOCAB[token] for token in question]", "tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__", "= [self.train[i][:int(9 * len(self.train[i]) / 10)] for i in range(3)] self.test = self.get_indexed_qa(raw_test)", "build_vocab(raw_babi): lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types = set(tokens) return types", "glob import glob import numpy as np from torch.utils.data import DataLoader from torch.utils.data.dataloader", "'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test = get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB =", "= next_index self.QA.IVOCAB[next_index] = token def get_raw_babi(taskid): paths = glob('data/en-10k/qa{}_*'.format(taskid)) for path in", "train, test def build_vocab(raw_babi): lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered) types =", "question, answer) return default_collate(batch) class BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id)", "return (contexts, questions, answers) def build_vocab(self, token): if not token in self.QA.VOCAB: next_index", "self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)] self.train = [self.train[i][:int(9", "= line.find('?') tmp = line[idx + 1:].split('\\t') task[\"Q\"] = line[:idx] task[\"A\"] = tmp[1].strip()", "def __init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test =", "self.build_vocab(token) context = [[self.QA.VOCAB[token] for token in sentence] for sentence in context] question", "class BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train,", "questions = [] contexts = [] answers = [] for qa in unindexed:", "mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test = get_raw_babi(task_id) self.QA =", "fp: test = fp.read() return train, test def build_vocab(raw_babi): lowered = raw_babi.lower() tokens", "= task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc) return tasks if __name__ == '__main__': dset_train", "= BabiDataset(20, is_train=True) train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data in", "[] for qa in unindexed: context = [c.lower().split() + ['<EOS>'] for c in", "fp.read() return train, test def build_vocab(raw_babi): lowered = raw_babi.lower() tokens = re.findall('[a-zA-Z]+', lowered)", "build_vocab(self, token): if not token in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index", "[[self.QA.VOCAB[token] for token in sentence] for sentence in context] question = qa['Q'].lower().split() +", "re from glob import glob import numpy as np from torch.utils.data import DataLoader", "self.train = self.get_indexed_qa(raw_train) self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)] self.train", "DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate) for batch_idx, data in enumerate(train_loader): contexts, questions, answers =", "+ 1:] # if not a question if line.find('?') == -1: task[\"C\"] +=", "= mode def __len__(self): if self.mode == 'train': return len(self.train[0]) elif self.mode ==", "with open(path, 'r') as fp: test = fp.read() return train, test def build_vocab(raw_babi):", "self.mode == 'test': return len(self.test[0]) def __getitem__(self, index): if self.mode == 'train': contexts,", "facts for num in tmp[2].split(): task[\"S\"].append(id_map[int(num.strip())]) tc = task.copy() tc['C'] = tc['C'].split('<line>')[:-1] tasks.append(tc)", "batch: context, question, _ = elem max_context_len = max_context_len if max_context_len > len(context)", "import default_collate from torch.utils.data.dataset import Dataset class adict(dict): def __init__(self, *av, **kav): dict.__init__(self,", "not token in self.QA.VOCAB: next_index = len(self.QA.VOCAB) self.QA.VOCAB[token] = next_index self.QA.IVOCAB[next_index] = token", "= 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test = get_raw_babi(task_id) self.QA = adict() self.QA.VOCAB", "+= line + '<line>' id_map[id] = counter counter += 1 else: idx =", "= [] contexts = [] answers = [] for qa in unindexed: context", "babi = raw_babi.strip().split('\\n') for i, line in enumerate(babi): id = int(line[0:line.find(' ')]) if", "torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset class adict(dict): def __init__(self, *av, **kav):", "questions, answers = self.test return contexts[index], questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed =", "= self def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len = float('-inf') max_question_len = float('-inf')", "sen in context: max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen) else len(sen) max_context_len", "np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0) question = np.pad(question, (0, max_question_len -", "BabiDataset(Dataset): def __init__(self, task_id, mode='train'): self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id) self.mode = mode raw_train, raw_test", ". ') line = line[line.find(' ') + 1:] # if not a question", "answer = elem _context = _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len)) for j, sen", "# adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ def get_unindexed_qa(raw_babi): tasks = [] task = None babi", "== 'test': return len(self.test[0]) def __getitem__(self, index): if self.mode == 'train': contexts, questions,", "in sentence] for sentence in context] question = qa['Q'].lower().split() + ['<EOS>'] for token", "open(path, 'r') as fp: test = fp.read() return train, test def build_vocab(raw_babi): lowered", "0 id_map = {} line = line.strip() line = line.replace('.', ' . ')", "task[\"A\"] = tmp[1].strip() task[\"S\"] = [] # Supporting facts for num in tmp[2].split():", "questions[index], answers[index] def get_indexed_qa(self, raw_babi): unindexed = get_unindexed_qa(raw_babi) questions = [] contexts =", "enumerate(batch): _context, question, answer = elem _context = _context[-max_context_len:] context = np.zeros((max_context_len, max_context_sen_len))", "sen in enumerate(_context): context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0) question", "context] question = qa['Q'].lower().split() + ['<EOS>'] for token in question: self.build_vocab(token) question =", "self.mode == 'train': contexts, questions, answers = self.train elif self.mode == 'valid': contexts,", "dict.__init__(self, *av, **kav) self.__dict__ = self def pad_collate(batch): max_context_sen_len = float('-inf') max_context_len =", "for token in question: self.build_vocab(token) question = [self.QA.VOCAB[token] for token in question] self.build_vocab(qa['A'].lower())" ]
[ "('python3', '3.6.4'), ]) def test_container_version(host, name, version): pkg = host.package(name) assert pkg.is_installed assert", "pkg = host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def", "def test_sshd_disabled(host): try: sshd = host.service('sshd') assert not sshd.is_running return except: return pytest.fail('sshd", "the test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3',", "the tests; # scope='function' uses a new container per test function. @pytest.fixture(scope='session') def", "test_sshd_disabled(host): try: sshd = host.service('sshd') assert not sshd.is_running return except: return pytest.fail('sshd should", "per test function. @pytest.fixture(scope='session') def host(request): # build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t',", "# at the end of the test suite, destroy the container subprocess.check_call(['docker', 'rm',", "= host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def test_pip_version(host,", "= pkgs[name] assert pkg assert pkg['version'] == version def test_sshd_disabled(host): try: sshd =", "docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def test_container_version(host, name, version): pkg = host.package(name)", "]) def test_container_version(host, name, version): pkg = host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version',", "suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ])", "# scope='session' uses the same container for all the tests; # scope='function' uses", "subprocess import testinfra import pprint # scope='session' uses the same container for all", "subprocess.check_call(['docker', 'build', '-t', 'web', '.']) # run a container docker_id = subprocess.check_output( ['docker',", "testinfra.get_host(\"docker://\" + docker_id) # at the end of the test suite, destroy the", "try: sshd = host.service('sshd') assert not sshd.is_running return except: return pytest.fail('sshd should not", "destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def", "'1.0.2'), ]) def test_pip_version(host, name, version): pkgs = host.pip_package.get_packages() pkg = pkgs[name] assert", "return a testinfra connection to the container yield testinfra.get_host(\"docker://\" + docker_id) # at", "[ ('python3', '3.6.4'), ]) def test_container_version(host, name, version): pkg = host.package(name) assert pkg.is_installed", "for all the tests; # scope='function' uses a new container per test function.", "'web', '.']) # run a container docker_id = subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip()", "./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.']) # run a container docker_id = subprocess.check_output(", "pkg = pkgs[name] assert pkg assert pkg['version'] == version def test_sshd_disabled(host): try: sshd", "'build', '-t', 'web', '.']) # run a container docker_id = subprocess.check_output( ['docker', 'run',", "'run', '-d', 'web']).decode().strip() # return a testinfra connection to the container yield testinfra.get_host(\"docker://\"", "pkgs = host.pip_package.get_packages() pkg = pkgs[name] assert pkg assert pkg['version'] == version def", "container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def test_container_version(host, name,", "sshd = host.service('sshd') assert not sshd.is_running return except: return pytest.fail('sshd should not be", "import pprint # scope='session' uses the same container for all the tests; #", "subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() # return a testinfra connection to the container", "assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def test_pip_version(host, name, version):", "version def test_sshd_disabled(host): try: sshd = host.service('sshd') assert not sshd.is_running return except: return", "== version def test_sshd_disabled(host): try: sshd = host.service('sshd') assert not sshd.is_running return except:", "host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def test_pip_version(host, name,", "all the tests; # scope='function' uses a new container per test function. @pytest.fixture(scope='session')", "scope='function' uses a new container per test function. @pytest.fixture(scope='session') def host(request): # build", "the container yield testinfra.get_host(\"docker://\" + docker_id) # at the end of the test", "container yield testinfra.get_host(\"docker://\" + docker_id) # at the end of the test suite,", "= subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() # return a testinfra connection to the", "'-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def test_container_version(host, name, version): pkg =", "version): pkgs = host.pip_package.get_packages() pkg = pkgs[name] assert pkg assert pkg['version'] == version", "the same container for all the tests; # scope='function' uses a new container", "a testinfra connection to the container yield testinfra.get_host(\"docker://\" + docker_id) # at the", "pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def test_pip_version(host, name, version): pkgs = host.pip_package.get_packages()", "test_container_version(host, name, version): pkg = host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask',", "('Flask', '1.0.2'), ]) def test_pip_version(host, name, version): pkgs = host.pip_package.get_packages() pkg = pkgs[name]", "# return a testinfra connection to the container yield testinfra.get_host(\"docker://\" + docker_id) #", "testinfra import pprint # scope='session' uses the same container for all the tests;", "host(request): # build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.']) # run a", "@pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def test_pip_version(host, name, version): pkgs = host.pip_package.get_packages() pkg", "of the test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [", "assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def test_pip_version(host, name, version): pkgs =", "subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def test_container_version(host, name, version):", "yield testinfra.get_host(\"docker://\" + docker_id) # at the end of the test suite, destroy", "# scope='function' uses a new container per test function. @pytest.fixture(scope='session') def host(request): #", "docker_id = subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() # return a testinfra connection to", "container for all the tests; # scope='function' uses a new container per test", "test function. @pytest.fixture(scope='session') def host(request): # build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web',", "run a container docker_id = subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() # return a", "connection to the container yield testinfra.get_host(\"docker://\" + docker_id) # at the end of", "docker_id) # at the end of the test suite, destroy the container subprocess.check_call(['docker',", "local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.']) # run a container docker_id =", "[ ('Flask', '1.0.2'), ]) def test_pip_version(host, name, version): pkgs = host.pip_package.get_packages() pkg =", "]) def test_pip_version(host, name, version): pkgs = host.pip_package.get_packages() pkg = pkgs[name] assert pkg", "def test_pip_version(host, name, version): pkgs = host.pip_package.get_packages() pkg = pkgs[name] assert pkg assert", "scope='session' uses the same container for all the tests; # scope='function' uses a", "build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.']) # run a container docker_id", "new container per test function. @pytest.fixture(scope='session') def host(request): # build local ./Dockerfile subprocess.check_call(['docker',", "pkg assert pkg['version'] == version def test_sshd_disabled(host): try: sshd = host.service('sshd') assert not", "'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def test_container_version(host, name, version): pkg", "'-d', 'web']).decode().strip() # return a testinfra connection to the container yield testinfra.get_host(\"docker://\" +", "to the container yield testinfra.get_host(\"docker://\" + docker_id) # at the end of the", "pkgs[name] assert pkg assert pkg['version'] == version def test_sshd_disabled(host): try: sshd = host.service('sshd')", "at the end of the test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f',", "+ docker_id) # at the end of the test suite, destroy the container", "name, version): pkgs = host.pip_package.get_packages() pkg = pkgs[name] assert pkg assert pkg['version'] ==", "host.pip_package.get_packages() pkg = pkgs[name] assert pkg assert pkg['version'] == version def test_sshd_disabled(host): try:", "test_pip_version(host, name, version): pkgs = host.pip_package.get_packages() pkg = pkgs[name] assert pkg assert pkg['version']", "same container for all the tests; # scope='function' uses a new container per", "@pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def test_container_version(host, name, version): pkg = host.package(name) assert", "import subprocess import testinfra import pprint # scope='session' uses the same container for", "the container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'), ]) def test_container_version(host,", "container per test function. @pytest.fixture(scope='session') def host(request): # build local ./Dockerfile subprocess.check_call(['docker', 'build',", "= host.service('sshd') assert not sshd.is_running return except: return pytest.fail('sshd should not be running')", "'3.6.4'), ]) def test_container_version(host, name, version): pkg = host.package(name) assert pkg.is_installed assert pkg.version.startswith(version)", "pkg['version'] == version def test_sshd_disabled(host): try: sshd = host.service('sshd') assert not sshd.is_running return", "testinfra connection to the container yield testinfra.get_host(\"docker://\" + docker_id) # at the end", "tests; # scope='function' uses a new container per test function. @pytest.fixture(scope='session') def host(request):", "end of the test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version',", "pprint # scope='session' uses the same container for all the tests; # scope='function'", "# run a container docker_id = subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() # return", "the end of the test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id])", "'web']).decode().strip() # return a testinfra connection to the container yield testinfra.get_host(\"docker://\" + docker_id)", "name, version): pkg = host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'),", "uses a new container per test function. @pytest.fixture(scope='session') def host(request): # build local", "container docker_id = subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() # return a testinfra connection", "version): pkg = host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ])", "test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id]) @pytest.mark.parametrize('name,version', [ ('python3', '3.6.4'),", "assert pkg assert pkg['version'] == version def test_sshd_disabled(host): try: sshd = host.service('sshd') assert", "pytest import subprocess import testinfra import pprint # scope='session' uses the same container", "assert pkg['version'] == version def test_sshd_disabled(host): try: sshd = host.service('sshd') assert not sshd.is_running", "def host(request): # build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.']) # run", "import testinfra import pprint # scope='session' uses the same container for all the", "# build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.']) # run a container", "= host.pip_package.get_packages() pkg = pkgs[name] assert pkg assert pkg['version'] == version def test_sshd_disabled(host):", "'-t', 'web', '.']) # run a container docker_id = subprocess.check_output( ['docker', 'run', '-d',", "function. @pytest.fixture(scope='session') def host(request): # build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.'])", "@pytest.fixture(scope='session') def host(request): # build local ./Dockerfile subprocess.check_call(['docker', 'build', '-t', 'web', '.']) #", "import pytest import subprocess import testinfra import pprint # scope='session' uses the same", "a container docker_id = subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() # return a testinfra", "pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [ ('Flask', '1.0.2'), ]) def test_pip_version(host, name, version): pkgs", "def test_container_version(host, name, version): pkg = host.package(name) assert pkg.is_installed assert pkg.version.startswith(version) @pytest.mark.parametrize('name,version', [", "['docker', 'run', '-d', 'web']).decode().strip() # return a testinfra connection to the container yield", "'.']) # run a container docker_id = subprocess.check_output( ['docker', 'run', '-d', 'web']).decode().strip() #", "uses the same container for all the tests; # scope='function' uses a new", "a new container per test function. @pytest.fixture(scope='session') def host(request): # build local ./Dockerfile" ]